From 0eadb4d1921d50a4a0e373551ef6d6b987ad6e1b Mon Sep 17 00:00:00 2001 From: Hojjat Jafarpour Date: Wed, 16 Aug 2017 14:14:19 -0700 Subject: [PATCH] Starting a clean ksql repo by removing all the previous commit history. This will be the repo that will be made public. --- .gitignore | 23 + LICENSE | 201 +++ README.md | 130 ++ bin/ksql-cli | 17 + bin/ksql-cli-stop | 23 + bin/ksql-datagen | 17 + bin/ksql-node | 17 + bin/ksql-run-class | 144 ++ bin/ksql-server-start | 20 + bin/ksql-server-stop | 17 + checkstyle/suppressions.xml | 37 + config/ksqlserver.properties | 5 + config/log4j-rolling.properties | 42 + config/log4j-silent.properties | 1 + config/log4j.properties | 16 + docker/README.md | 14 + ksql-cli/Dockerfile | 11 + ksql-cli/pom.xml | 192 +++ ksql-cli/src/assembly/development.xml | 51 + ksql-cli/src/assembly/package.xml | 55 + ksql-cli/src/assembly/standalone.xml | 45 + .../src/main/java/io/confluent/ksql/Ksql.java | 46 + .../main/java/io/confluent/ksql/cli/Cli.java | 551 +++++++ .../java/io/confluent/ksql/cli/LocalCli.java | 61 + .../java/io/confluent/ksql/cli/RemoteCli.java | 68 + .../ksql/cli/StandaloneExecutor.java | 55 + .../cli/commands/AbstractCliCommands.java | 77 + .../io/confluent/ksql/cli/commands/Local.java | 156 ++ .../confluent/ksql/cli/commands/Remote.java | 87 + .../ksql/cli/commands/Standalone.java | 94 ++ .../ksql/cli/console/CliSpecificCommand.java | 15 + .../confluent/ksql/cli/console/Console.java | 520 ++++++ .../ksql/cli/console/JLineReader.java | 90 ++ .../ksql/cli/console/JLineTerminal.java | 68 + .../ksql/cli/console/LineReader.java | 15 + .../ksql/cli/console/OutputFormat.java | 27 + .../java/io/confluent/ksql/util/CliUtils.java | 149 ++ .../ksql/util/TimestampLogFileAppender.java | 23 + ksql-cli/src/main/resources/log4j.properties | 11 + .../test/java/io/confluent/ksql/CliTest.java | 359 +++++ .../ksql/CliTestFailedException.java | 13 + .../java/io/confluent/ksql/FakeException.java | 18 + .../io/confluent/ksql/TestLineReader.java | 24 + .../java/io/confluent/ksql/TestResult.java | 122 ++ .../java/io/confluent/ksql/TestRunner.java | 64 + .../java/io/confluent/ksql/TestTerminal.java | 92 ++ .../ksql/cli/console/ConsoleTest.java | 123 ++ ksql-core/pom.xml | 188 +++ .../io/confluent/ksql/parser/SqlBase.g4 | 745 +++++++++ .../java/io/confluent/ksql/KsqlContext.java | 96 ++ .../java/io/confluent/ksql/KsqlEngine.java | 388 +++++ .../java/io/confluent/ksql/QueryEngine.java | 413 +++++ .../ksql/analyzer/AggregateAnalysis.java | 61 + .../ksql/analyzer/AggregateAnalyzer.java | 81 + .../io/confluent/ksql/analyzer/Analysis.java | 155 ++ .../ksql/analyzer/AnalysisContext.java | 45 + .../io/confluent/ksql/analyzer/Analyzer.java | 533 ++++++ .../ksql/analyzer/ExpressionAnalyzer.java | 131 ++ .../java/io/confluent/ksql/ddl/DdlConfig.java | 21 + .../commands/AbstractCreateStreamCommand.java | 194 +++ .../ddl/commands/CreateStreamCommand.java | 39 + .../ksql/ddl/commands/CreateTableCommand.java | 57 + .../ksql/ddl/commands/DDLCommand.java | 13 + .../ksql/ddl/commands/DDLCommandExec.java | 56 + .../ksql/ddl/commands/DDLCommandResult.java | 28 + .../ksql/ddl/commands/DropSourceCommand.java | 33 + .../ksql/ddl/commands/DropTopicCommand.java | 28 + .../ddl/commands/RegisterTopicCommand.java | 111 ++ .../ksql/exception/ExceptionUtil.java | 27 + .../KafkaResponseGetFailedException.java | 15 + .../exception/KafkaTopicClientException.java | 15 + .../ksql/exception/KafkaTopicException.java | 15 + .../ksql/exception/ParseFailedException.java | 17 + .../function/KsqlAggFunctionDeterminer.java | 33 + .../ksql/function/KsqlAggregateFunction.java | 68 + .../confluent/ksql/function/KsqlFunction.java | 42 + .../ksql/function/KsqlFunctionException.java | 18 + .../ksql/function/KsqlFunctions.java | 186 +++ .../ksql/function/udaf/KudafAggregator.java | 83 + .../ksql/function/udaf/KudafInitializer.java | 30 + .../count/CountAggFunctionDeterminer.java | 24 + .../ksql/function/udaf/count/CountKudaf.java | 34 + .../function/udaf/max/DoubleMaxKudaf.java | 42 + .../ksql/function/udaf/max/LongMaxKudaf.java | 42 + .../udaf/max/MaxAggFunctionDeterminer.java | 33 + .../function/udaf/min/DoubleMinKudaf.java | 42 + .../ksql/function/udaf/min/LongMinKudaf.java | 42 + .../udaf/min/MinAggFunctionDeterminer.java | 33 + .../function/udaf/sum/DoubleSumKudaf.java | 37 + .../ksql/function/udaf/sum/LongSumKudaf.java | 34 + .../udaf/sum/SumAggFunctionDeterminer.java | 34 + .../io/confluent/ksql/function/udf/Kudf.java | 12 + .../udf/datetime/StringToTimestamp.java | 38 + .../udf/datetime/TimestampToString.java | 38 + .../udf/json/JsonExtractStringKudf.java | 61 + .../ksql/function/udf/math/AbsKudf.java | 23 + .../ksql/function/udf/math/CeilKudf.java | 23 + .../ksql/function/udf/math/FloorKudf.java | 23 + .../ksql/function/udf/math/RandomKudf.java | 24 + .../ksql/function/udf/math/RoundKudf.java | 23 + .../ksql/function/udf/string/ConcatKudf.java | 25 + .../ksql/function/udf/string/IfNullKudf.java | 28 + .../ksql/function/udf/string/LCaseKudf.java | 24 + .../ksql/function/udf/string/LenKudf.java | 24 + .../function/udf/string/SubstringKudf.java | 31 + .../ksql/function/udf/string/TrimKudf.java | 24 + .../ksql/function/udf/string/UCaseKudf.java | 24 + .../ksql/function/udf/util/CastKudf.java | 26 + .../confluent/ksql/metastore/DataSource.java | 21 + .../confluent/ksql/metastore/KsqlStdOut.java | 48 + .../confluent/ksql/metastore/KsqlStream.java | 43 + .../confluent/ksql/metastore/KsqlTable.java | 57 + .../confluent/ksql/metastore/KsqlTopic.java | 57 + .../confluent/ksql/metastore/MetaStore.java | 35 + .../ksql/metastore/MetaStoreImpl.java | 113 ++ .../ksql/metastore/MetastoreUtil.java | 351 ++++ .../ksql/metastore/StructuredDataSource.java | 74 + .../io/confluent/ksql/parser/AstBuilder.java | 1430 +++++++++++++++++ .../ksql/parser/CaseInsensitiveStream.java | 72 + .../parser/CodegenExpressionFormatter.java | 470 ++++++ .../ksql/parser/ExpressionFormatter.java | 553 +++++++ .../io/confluent/ksql/parser/KsqlParser.java | 117 ++ .../ksql/parser/KsqlParserErrorStrategy.java | 85 + .../ksql/parser/ParsingException.java | 50 + .../confluent/ksql/parser/SqlFormatter.java | 696 ++++++++ .../rewrite/AggregateExpressionRewriter.java | 39 + .../ExpressionFormatterQueryRewrite.java | 553 +++++++ .../rewrite/SqlFormatterQueryRewrite.java | 724 +++++++++ .../tree/AbstractStreamCreateStatement.java | 21 + .../tree/AbstractStreamDropStatement.java | 15 + .../ksql/parser/tree/AliasedRelation.java | 88 + .../ksql/parser/tree/AllColumns.java | 77 + .../tree/ArithmeticBinaryExpression.java | 88 + .../tree/ArithmeticUnaryExpression.java | 88 + .../ksql/parser/tree/AstVisitor.java | 397 +++++ .../ksql/parser/tree/BetweenPredicate.java | 76 + .../ksql/parser/tree/BinaryLiteral.java | 84 + .../ksql/parser/tree/BooleanLiteral.java | 66 + .../io/confluent/ksql/parser/tree/Cast.java | 98 ++ .../parser/tree/ComparisonExpression.java | 136 ++ .../ksql/parser/tree/CreateStream.java | 95 ++ .../parser/tree/CreateStreamAsSelect.java | 101 ++ .../ksql/parser/tree/CreateTable.java | 97 ++ .../ksql/parser/tree/CreateTableAsSelect.java | 99 ++ .../ksql/parser/tree/CreateView.java | 81 + .../ksql/parser/tree/DecimalLiteral.java | 56 + .../ksql/parser/tree/DefaultAstVisitor.java | 393 +++++ .../DefaultExpressionTraversalVisitor.java | 19 + .../parser/tree/DefaultTraversalVisitor.java | 374 +++++ .../io/confluent/ksql/parser/tree/Delete.java | 72 + .../parser/tree/DereferenceExpression.java | 93 ++ .../ksql/parser/tree/DoubleLiteral.java | 64 + .../ksql/parser/tree/DropStream.java | 70 + .../confluent/ksql/parser/tree/DropTable.java | 69 + .../confluent/ksql/parser/tree/DropTopic.java | 70 + .../confluent/ksql/parser/tree/DropView.java | 70 + .../ksql/parser/tree/EmptyStatement.java | 45 + .../io/confluent/ksql/parser/tree/Except.java | 84 + .../ksql/parser/tree/ExistsPredicate.java | 57 + .../confluent/ksql/parser/tree/Explain.java | 88 + .../ksql/parser/tree/ExplainFormat.java | 63 + .../ksql/parser/tree/ExplainOption.java | 20 + .../ksql/parser/tree/ExplainType.java | 63 + .../ksql/parser/tree/ExportCatalog.java | 45 + .../ksql/parser/tree/Expression.java | 42 + .../ksql/parser/tree/ExpressionRewriter.java | 151 ++ .../parser/tree/ExpressionTreeRewriter.java | 668 ++++++++ .../confluent/ksql/parser/tree/Extract.java | 88 + .../ksql/parser/tree/FieldReference.java | 50 + .../ksql/parser/tree/FrameBound.java | 91 ++ .../ksql/parser/tree/FunctionCall.java | 101 ++ .../ksql/parser/tree/GenericLiteral.java | 75 + .../confluent/ksql/parser/tree/GroupBy.java | 76 + .../ksql/parser/tree/GroupingElement.java | 24 + .../ksql/parser/tree/GroupingSets.java | 76 + .../parser/tree/HoppingWindowExpression.java | 80 + .../ksql/parser/tree/InListExpression.java | 55 + .../ksql/parser/tree/InPredicate.java | 61 + .../confluent/ksql/parser/tree/Intersect.java | 70 + .../ksql/parser/tree/IntervalLiteral.java | 113 ++ .../ksql/parser/tree/IsNotNullPredicate.java | 57 + .../ksql/parser/tree/IsNullPredicate.java | 57 + .../io/confluent/ksql/parser/tree/Join.java | 103 ++ .../ksql/parser/tree/JoinCriteria.java | 18 + .../io/confluent/ksql/parser/tree/JoinOn.java | 48 + .../confluent/ksql/parser/tree/JoinUsing.java | 54 + .../parser/tree/KsqlWindowExpression.java | 14 + .../ksql/parser/tree/LambdaExpression.java | 64 + .../ksql/parser/tree/LikePredicate.java | 75 + .../ksql/parser/tree/ListProperties.java | 33 + .../ksql/parser/tree/ListQueries.java | 33 + .../parser/tree/ListRegisteredTopics.java | 33 + .../ksql/parser/tree/ListStreams.java | 34 + .../ksql/parser/tree/ListTables.java | 32 + .../ksql/parser/tree/ListTopics.java | 33 + .../confluent/ksql/parser/tree/Literal.java | 20 + .../ksql/parser/tree/LoadProperties.java | 33 + .../parser/tree/LogicalBinaryExpression.java | 99 ++ .../ksql/parser/tree/LongLiteral.java | 67 + .../ksql/parser/tree/NaturalJoin.java | 29 + .../io/confluent/ksql/parser/tree/Node.java | 39 + .../ksql/parser/tree/NodeLocation.java | 24 + .../ksql/parser/tree/NotExpression.java | 57 + .../ksql/parser/tree/NullIfExpression.java | 64 + .../ksql/parser/tree/NullLiteral.java | 41 + .../ksql/parser/tree/PrintTopic.java | 81 + .../ksql/parser/tree/QualifiedName.java | 94 ++ .../parser/tree/QualifiedNameReference.java | 58 + .../io/confluent/ksql/parser/tree/Query.java | 108 ++ .../confluent/ksql/parser/tree/QueryBody.java | 20 + .../ksql/parser/tree/QuerySpecification.java | 166 ++ .../ksql/parser/tree/RegisterTopic.java | 88 + .../confluent/ksql/parser/tree/Relation.java | 20 + .../ksql/parser/tree/RenameColumn.java | 80 + .../ksql/parser/tree/RenameTable.java | 71 + .../io/confluent/ksql/parser/tree/Row.java | 59 + .../confluent/ksql/parser/tree/RunScript.java | 45 + .../ksql/parser/tree/SampledRelation.java | 113 ++ .../parser/tree/SearchedCaseExpression.java | 70 + .../io/confluent/ksql/parser/tree/Select.java | 76 + .../ksql/parser/tree/SelectItem.java | 15 + .../parser/tree/SessionWindowExpression.java | 60 + .../ksql/parser/tree/SetOperation.java | 30 + .../ksql/parser/tree/SetProperty.java | 66 + .../ksql/parser/tree/SetSession.java | 70 + .../ksql/parser/tree/ShowCatalogs.java | 56 + .../ksql/parser/tree/ShowColumns.java | 69 + .../ksql/parser/tree/ShowCreate.java | 75 + .../ksql/parser/tree/ShowFunctions.java | 48 + .../ksql/parser/tree/ShowPartitions.java | 94 ++ .../ksql/parser/tree/ShowSchemas.java | 71 + .../ksql/parser/tree/ShowSession.java | 48 + .../parser/tree/SimpleCaseExpression.java | 79 + .../ksql/parser/tree/SimpleGroupBy.java | 74 + .../ksql/parser/tree/SingleColumn.java | 108 ++ .../confluent/ksql/parser/tree/SortItem.java | 89 + .../ksql/parser/tree/StackableAstVisitor.java | 52 + .../confluent/ksql/parser/tree/Statement.java | 20 + .../ksql/parser/tree/Statements.java | 54 + .../ksql/parser/tree/StringLiteral.java | 66 + .../ksql/parser/tree/SubqueryExpression.java | 54 + .../ksql/parser/tree/SubscriptExpression.java | 63 + .../ksql/parser/tree/SymbolReference.java | 45 + .../io/confluent/ksql/parser/tree/Table.java | 84 + .../ksql/parser/tree/TableElement.java | 71 + .../ksql/parser/tree/TableSubquery.java | 63 + .../ksql/parser/tree/TerminateQuery.java | 49 + .../ksql/parser/tree/TimeLiteral.java | 57 + .../ksql/parser/tree/TimestampLiteral.java | 58 + .../parser/tree/TumblingWindowExpression.java | 60 + .../io/confluent/ksql/parser/tree/Union.java | 70 + .../ksql/parser/tree/UnsetProperty.java | 48 + .../io/confluent/ksql/parser/tree/Values.java | 65 + .../ksql/parser/tree/WhenClause.java | 61 + .../io/confluent/ksql/parser/tree/Window.java | 62 + .../ksql/parser/tree/WindowExpression.java | 80 + .../ksql/parser/tree/WindowFrame.java | 84 + .../ksql/parser/tree/WindowName.java | 49 + .../io/confluent/ksql/parser/tree/With.java | 78 + .../confluent/ksql/parser/tree/WithQuery.java | 83 + .../confluent/ksql/physical/GenericRow.java | 71 + .../ksql/physical/PhysicalPlanBuilder.java | 693 ++++++++ .../ksql/planner/DefaultTraversalVisitor.java | 432 +++++ .../ksql/planner/LogicalPlanner.java | 179 +++ .../confluent/ksql/planner/PlanException.java | 13 + .../ksql/planner/plan/AggregateNode.java | 142 ++ .../ksql/planner/plan/FilterNode.java | 67 + .../confluent/ksql/planner/plan/JoinNode.java | 115 ++ .../ksql/planner/plan/KsqlBareOutputNode.java | 34 + .../plan/KsqlStructuredDataOutputNode.java | 65 + .../ksql/planner/plan/OutputNode.java | 64 + .../confluent/ksql/planner/plan/PlanNode.java | 47 + .../ksql/planner/plan/PlanNodeId.java | 53 + .../ksql/planner/plan/PlanVisitor.java | 33 + .../ksql/planner/plan/ProjectNode.java | 95 ++ .../ksql/planner/plan/SourceNode.java | 36 + .../plan/StructuredDataSourceNode.java | 85 + .../confluent/ksql/serde/KsqlTopicSerDe.java | 20 + .../ksql/serde/avro/KsqlAvroTopicSerDe.java | 22 + .../avro/KsqlGenericRowAvroDeserializer.java | 133 ++ .../avro/KsqlGenericRowAvroSerializer.java | 85 + .../delimited/KsqlDelimitedDeserializer.java | 96 ++ .../delimited/KsqlDelimitedSerializer.java | 49 + .../delimited/KsqlDelimitedTopicSerDe.java | 16 + .../ksql/serde/json/KsqlJsonDeserializer.java | 140 ++ .../ksql/serde/json/KsqlJsonSerializer.java | 65 + .../ksql/serde/json/KsqlJsonTopicSerDe.java | 22 + .../ksql/structured/QueuedSchemaKStream.java | 114 ++ .../ksql/structured/SchemaKGroupedStream.java | 125 ++ .../ksql/structured/SchemaKStream.java | 326 ++++ .../ksql/structured/SchemaKTable.java | 194 +++ .../ksql/structured/SqlPredicate.java | 159 ++ .../ksql/util/DataSourceExtractor.java | 216 +++ .../ksql/util/ExpressionMetadata.java | 41 + .../ksql/util/ExpressionTypeManager.java | 197 +++ .../confluent/ksql/util/ExpressionUtil.java | 190 +++ .../util/GenericRowValueTypeEnforcer.java | 133 ++ .../confluent/ksql/util/KafkaTopicClient.java | 56 + .../ksql/util/KafkaTopicClientImpl.java | 82 + .../io/confluent/ksql/util/KsqlConfig.java | 164 ++ .../io/confluent/ksql/util/KsqlException.java | 18 + .../ksql/util/KsqlPreconditions.java | 25 + .../java/io/confluent/ksql/util/Pair.java | 24 + .../ksql/util/PersistentQueryMetadata.java | 45 + .../io/confluent/ksql/util/QueryMetadata.java | 67 + .../ksql/util/QueuedQueryMetadata.java | 51 + .../io/confluent/ksql/util/SchemaUtil.java | 216 +++ .../io/confluent/ksql/util/SerDeUtil.java | 86 + .../io/confluent/ksql/util/StringUtil.java | 35 + .../java/io/confluent/ksql/util/Version.java | 33 + .../io/confluent/ksql/util/WindowedSerde.java | 49 + .../ksql/util/json/JsonPathTokenizer.java | 179 +++ .../timestamp/KsqlTimestampExtractor.java | 48 + ksql-core/src/main/resources/checkstyle.xml | 84 + .../main/resources/ksql-version.properties | 1 + ksql-core/src/main/resources/log4j.properties | 5 + .../ksql/analyzer/AggregateAnalyzerTest.java | 213 +++ .../confluent/ksql/analyzer/AnalyzerTest.java | 188 +++ .../ksql/integtests/json/JsonFormatTest.java | 606 +++++++ .../ksql/metastore/MetastoreTest.java | 62 + .../ksql/metastore/MetastoreUtilTest.java | 131 ++ .../confluent/ksql/parser/KsqlParserTest.java | 580 +++++++ .../physical/PhysicalPlanBuilderTest.java | 177 ++ .../ksql/planner/LogicalPlannerTest.java | 157 ++ .../ksql/structured/SchemaKStreamTest.java | 178 ++ .../ksql/structured/SchemaKTableTest.java | 182 +++ .../ksql/structured/SqlPredicateTest.java | 118 ++ .../EmbeddedSingleNodeKafkaCluster.java | 150 ++ .../ksql/testutils/IntegrationTestUtils.java | 198 +++ .../ksql/testutils/KafkaEmbedded.java | 163 ++ .../ksql/testutils/ZooKeeperEmbedded.java | 56 + .../ksql/util/ExpressionTypeManagerTest.java | 106 ++ .../ksql/util/ExpressionUtilTest.java | 180 +++ .../ksql/util/FakeKafkaTopicClient.java | 43 + .../io/confluent/ksql/util/KsqlTestUtil.java | 106 ++ .../ksql/util/OrderDataProvider.java | 106 ++ .../confluent/ksql/util/SchemaUtilTest.java | 30 + .../confluent/ksql/util/TestDataProvider.java | 52 + .../io/confluent/ksql/util/TopicConsumer.java | 72 + .../io/confluent/ksql/util/TopicProducer.java | 76 + .../ksql/util/json/JsonPathTokenizerTest.java | 25 + ksql-core/src/test/resources/TestCatalog.json | 92 ++ .../src/test/resources/avro_order_schema.avro | 11 + ksql-examples/Dockerfile | 11 + ksql-examples/README.md | 53 + .../examples/clickstream-analysis/README.md | 167 ++ .../clickstream-analysis-dashboard.json | 978 +++++++++++ .../clickstream-analysis-dashboard.sh | 13 + .../clickstream-schema.sql | 117 ++ .../connect-config/README.md | 25 + .../connect-config/connect.properties | 2 + .../null-filter-4.0.0-SNAPSHOT.jar | Bin 0 -> 3119 bytes .../elastic-dynamic-template.sh | 36 + .../ksql-connect-es-grafana.sh | 62 + .../ksql-tables-to-grafana.sh | 51 + .../malicious-users-dashboard.json | 203 +++ .../malicious-users-dashboard.sh | 13 + .../clickstream-analysis/run-grafana.sh | 2 + ksql-examples/pom.xml | 149 ++ ksql-examples/src/assembly/development.xml | 51 + ksql-examples/src/assembly/package.xml | 52 + ksql-examples/src/assembly/standalone.xml | 30 + .../avro/random/generator/Generator.java | 1379 ++++++++++++++++ .../confluent/avro/random/generator/Main.java | 299 ++++ .../confluent/ksql/datagen/AvroConsumer.java | 142 ++ .../confluent/ksql/datagen/AvroProducer.java | 28 + .../io/confluent/ksql/datagen/DataGen.java | 344 ++++ .../ksql/datagen/DataGenProducer.java | 195 +++ .../ksql/datagen/DelimitedConsumer.java | 82 + .../ksql/datagen/DelimitedProducer.java | 21 + .../confluent/ksql/datagen/JsonConsumer.java | 249 +++ .../confluent/ksql/datagen/JsonProducer.java | 21 + .../ksql/datagen/SessionManager.java | 139 ++ .../confluent/ksql/embedded/EmbeddedKsql.java | 30 + .../src/main/resources/SampleQueries.sql | 26 + .../resources/clickstream_codes_schema.avro | 43 + .../main/resources/clickstream_schema.avro | 159 ++ .../resources/clickstream_users_schema.avro | 85 + .../src/main/resources/ksql-server.properties | 9 + .../src/main/resources/orders_schema.avro | 39 + .../src/main/resources/pageviews_schema.avro | 28 + .../src/main/resources/users_schema.avro | 38 + .../ksql/datagen/SessionManagerTest.java | 137 ++ ksql-rest-app/pom.xml | 133 ++ ksql-rest-app/src/assembly/development.xml | 41 + ksql-rest-app/src/assembly/package.xml | 54 + ksql-rest-app/src/assembly/standalone.xml | 45 + .../ksql/rest/client/KsqlRestClient.java | 229 +++ .../ksql/rest/client/RestResponse.java | 120 ++ .../ksql/rest/entity/CommandStatus.java | 58 + .../ksql/rest/entity/CommandStatusEntity.java | 99 ++ .../ksql/rest/entity/CommandStatuses.java | 34 + .../ksql/rest/entity/ErrorMessage.java | 78 + .../ksql/rest/entity/ErrorMessageEntity.java | 53 + .../ksql/rest/entity/ExecutionPlan.java | 44 + .../ksql/rest/entity/KafkaTopicInfo.java | 63 + .../ksql/rest/entity/KafkaTopicsList.java | 134 ++ .../ksql/rest/entity/KsqlEntity.java | 37 + .../ksql/rest/entity/KsqlEntityList.java | 25 + .../ksql/rest/entity/KsqlRequest.java | 53 + .../ksql/rest/entity/KsqlTopicInfo.java | 68 + .../ksql/rest/entity/KsqlTopicsList.java | 61 + .../ksql/rest/entity/PropertiesList.java | 49 + .../confluent/ksql/rest/entity/Queries.java | 96 ++ .../ksql/rest/entity/SchemaMapper.java | 95 ++ .../ksql/rest/entity/ServerInfo.java | 44 + .../ksql/rest/entity/SourceDescription.java | 149 ++ .../ksql/rest/entity/StreamedRow.java | 62 + .../ksql/rest/entity/StreamsList.java | 118 ++ .../ksql/rest/entity/TablesList.java | 124 ++ .../ksql/rest/entity/TopicDescription.java | 73 + .../ksql/rest/server/CliOptions.java | 81 + .../ksql/rest/server/KsqlRestApplication.java | 319 ++++ .../ksql/rest/server/KsqlRestConfig.java | 122 ++ .../ksql/rest/server/StatementParser.java | 31 + .../ksql/rest/server/computation/Command.java | 52 + .../rest/server/computation/CommandId.java | 72 + .../server/computation/CommandIdAssigner.java | 114 ++ .../server/computation/CommandRunner.java | 107 ++ .../rest/server/computation/CommandStore.java | 199 +++ .../server/computation/StatementExecutor.java | 449 ++++++ .../server/resources/KsqlExceptionMapper.java | 25 + .../rest/server/resources/KsqlResource.java | 395 +++++ .../server/resources/ServerInfoResource.java | 29 + .../rest/server/resources/StatusResource.java | 49 + .../resources/streaming/QueryRowWriter.java | 64 + .../streaming/QueryStreamWriter.java | 130 ++ .../streaming/StreamedQueryResource.java | 88 + .../streaming/TopicStreamWriter.java | 162 ++ .../io/confluent/ksql/rest/quickstart.html | 438 +++++ .../src/main/resources/log4j.properties | 5 + .../ksql/rest/client/KsqlRestClientTest.java | 80 + .../ksql/rest/server/KsqlRestConfigTest.java | 80 + .../server/computation/CommandRunnerTest.java | 93 ++ .../computation/StatementExecutorTest.java | 126 ++ .../rest/server/mock/MockApplication.java | 21 + .../rest/server/mock/MockCommandStore.java | 83 + .../server/mock/MockKafkaTopicClient.java | 44 + .../ksql/rest/server/mock/MockKsqkEngine.java | 13 + .../rest/server/mock/MockKsqlResources.java | 29 + .../rest/server/mock/MockStatusResource.java | 38 + .../mock/MockStreamedQueryResource.java | 39 + .../server/resources/KsqlResourceTest.java | 350 ++++ .../server/resources/StatusResourceTest.java | 89 + .../resources/StreamedQueryResourceTest.java | 221 +++ .../ksql/rest/server/utils/TestUtils.java | 62 + licenses/LICENSE-annotations-3.0.1.txt | 202 +++ licenses/LICENSE-antlr4-runtime-4.7.txt | 148 ++ licenses/LICENSE-avro-1.8.1.txt | 202 +++ ...random-generator-0.1-20170531.181813-9.txt | 202 +++ licenses/LICENSE-commons-collections4-4.0.txt | 202 +++ licenses/LICENSE-commons-compress-1.8.1.txt | 201 +++ licenses/LICENSE-commons-csv-1.4.txt | 202 +++ licenses/LICENSE-commons-lang3-3.3.2.txt | 202 +++ licenses/LICENSE-generex-1.0.1.txt | 202 +++ .../LICENSE-jackson-annotations-2.8.0.txt | 8 + licenses/LICENSE-jackson-core-2.8.4.txt | 8 + licenses/LICENSE-jackson-core-2.8.5.txt | 8 + licenses/LICENSE-jackson-core-asl-1.9.13.txt | 13 + licenses/LICENSE-jackson-databind-2.8.4.txt | 8 + licenses/LICENSE-jackson-databind-2.8.5.txt | 8 + .../LICENSE-jackson-mapper-asl-1.9.13.txt | 13 + licenses/LICENSE-jline-3.3.1.txt | 7 + licenses/LICENSE-jol-core-0.2.txt | 347 ++++ licenses/LICENSE-jopt-simple-5.0.4.txt | 7 + licenses/LICENSE-jsr305-3.0.2.txt | 202 +++ licenses/LICENSE-log4j-1.2.17.txt | 202 +++ licenses/LICENSE-netty-3.7.0.Final.txt | 421 +++++ licenses/LICENSE-snappy-java-1.1.1.3.txt | 202 +++ licenses/LICENSE-snappy-java-1.1.4.txt | 202 +++ licenses/LICENSE-zookeeper-3.4.8.txt | 202 +++ licenses/licenses.html | 129 ++ notices/NOTICE-avro-1.8.1.txt | 8 + notices/NOTICE-jackson-core-2.8.5.txt | 20 + notices/NOTICE-jackson-core-asl-1.9.13.txt | 7 + notices/NOTICE-jackson-databind-2.8.5.txt | 20 + notices/NOTICE-jackson-mapper-asl-1.9.13.txt | 7 + notices/NOTICE-log4j-1.2.17.txt | 5 + pom.xml | 232 +++ quickstart/docker-compose.yml | 131 ++ quickstart/ksql-quickstart-schemas.jpg | Bin 0 -> 59921 bytes quickstart/quickstart-docker.rst | 129 ++ quickstart/quickstart-non-docker.rst | 130 ++ quickstart/quickstart.rst | 209 +++ 484 files changed, 51195 insertions(+) create mode 100644 .gitignore create mode 100644 LICENSE create mode 100644 README.md create mode 100755 bin/ksql-cli create mode 100755 bin/ksql-cli-stop create mode 100755 bin/ksql-datagen create mode 100755 bin/ksql-node create mode 100755 bin/ksql-run-class create mode 100755 bin/ksql-server-start create mode 100755 bin/ksql-server-stop create mode 100644 checkstyle/suppressions.xml create mode 100644 config/ksqlserver.properties create mode 100644 config/log4j-rolling.properties create mode 100644 config/log4j-silent.properties create mode 100644 config/log4j.properties create mode 100644 docker/README.md create mode 100644 ksql-cli/Dockerfile create mode 100644 ksql-cli/pom.xml create mode 100644 ksql-cli/src/assembly/development.xml create mode 100644 ksql-cli/src/assembly/package.xml create mode 100644 ksql-cli/src/assembly/standalone.xml create mode 100644 ksql-cli/src/main/java/io/confluent/ksql/Ksql.java create mode 100644 ksql-cli/src/main/java/io/confluent/ksql/cli/Cli.java create mode 100644 ksql-cli/src/main/java/io/confluent/ksql/cli/LocalCli.java create mode 100644 ksql-cli/src/main/java/io/confluent/ksql/cli/RemoteCli.java create mode 100644 ksql-cli/src/main/java/io/confluent/ksql/cli/StandaloneExecutor.java create mode 100644 ksql-cli/src/main/java/io/confluent/ksql/cli/commands/AbstractCliCommands.java create mode 100644 ksql-cli/src/main/java/io/confluent/ksql/cli/commands/Local.java create mode 100644 ksql-cli/src/main/java/io/confluent/ksql/cli/commands/Remote.java create mode 100644 ksql-cli/src/main/java/io/confluent/ksql/cli/commands/Standalone.java create mode 100644 ksql-cli/src/main/java/io/confluent/ksql/cli/console/CliSpecificCommand.java create mode 100644 ksql-cli/src/main/java/io/confluent/ksql/cli/console/Console.java create mode 100644 ksql-cli/src/main/java/io/confluent/ksql/cli/console/JLineReader.java create mode 100644 ksql-cli/src/main/java/io/confluent/ksql/cli/console/JLineTerminal.java create mode 100644 ksql-cli/src/main/java/io/confluent/ksql/cli/console/LineReader.java create mode 100644 ksql-cli/src/main/java/io/confluent/ksql/cli/console/OutputFormat.java create mode 100644 ksql-cli/src/main/java/io/confluent/ksql/util/CliUtils.java create mode 100644 ksql-cli/src/main/java/io/confluent/ksql/util/TimestampLogFileAppender.java create mode 100644 ksql-cli/src/main/resources/log4j.properties create mode 100644 ksql-cli/src/test/java/io/confluent/ksql/CliTest.java create mode 100644 ksql-cli/src/test/java/io/confluent/ksql/CliTestFailedException.java create mode 100644 ksql-cli/src/test/java/io/confluent/ksql/FakeException.java create mode 100644 ksql-cli/src/test/java/io/confluent/ksql/TestLineReader.java create mode 100644 ksql-cli/src/test/java/io/confluent/ksql/TestResult.java create mode 100644 ksql-cli/src/test/java/io/confluent/ksql/TestRunner.java create mode 100644 ksql-cli/src/test/java/io/confluent/ksql/TestTerminal.java create mode 100644 ksql-cli/src/test/java/io/confluent/ksql/cli/console/ConsoleTest.java create mode 100644 ksql-core/pom.xml create mode 100644 ksql-core/src/main/antlr4/io/confluent/ksql/parser/SqlBase.g4 create mode 100644 ksql-core/src/main/java/io/confluent/ksql/KsqlContext.java create mode 100644 ksql-core/src/main/java/io/confluent/ksql/KsqlEngine.java create mode 100644 ksql-core/src/main/java/io/confluent/ksql/QueryEngine.java create mode 100644 ksql-core/src/main/java/io/confluent/ksql/analyzer/AggregateAnalysis.java create mode 100644 ksql-core/src/main/java/io/confluent/ksql/analyzer/AggregateAnalyzer.java create mode 100644 ksql-core/src/main/java/io/confluent/ksql/analyzer/Analysis.java create mode 100644 ksql-core/src/main/java/io/confluent/ksql/analyzer/AnalysisContext.java create mode 100644 ksql-core/src/main/java/io/confluent/ksql/analyzer/Analyzer.java create mode 100644 ksql-core/src/main/java/io/confluent/ksql/analyzer/ExpressionAnalyzer.java create mode 100644 ksql-core/src/main/java/io/confluent/ksql/ddl/DdlConfig.java create mode 100644 ksql-core/src/main/java/io/confluent/ksql/ddl/commands/AbstractCreateStreamCommand.java create mode 100644 ksql-core/src/main/java/io/confluent/ksql/ddl/commands/CreateStreamCommand.java create mode 100644 ksql-core/src/main/java/io/confluent/ksql/ddl/commands/CreateTableCommand.java create mode 100644 ksql-core/src/main/java/io/confluent/ksql/ddl/commands/DDLCommand.java create mode 100644 ksql-core/src/main/java/io/confluent/ksql/ddl/commands/DDLCommandExec.java create mode 100644 ksql-core/src/main/java/io/confluent/ksql/ddl/commands/DDLCommandResult.java create mode 100644 ksql-core/src/main/java/io/confluent/ksql/ddl/commands/DropSourceCommand.java create mode 100644 ksql-core/src/main/java/io/confluent/ksql/ddl/commands/DropTopicCommand.java create mode 100644 ksql-core/src/main/java/io/confluent/ksql/ddl/commands/RegisterTopicCommand.java create mode 100644 ksql-core/src/main/java/io/confluent/ksql/exception/ExceptionUtil.java create mode 100644 ksql-core/src/main/java/io/confluent/ksql/exception/KafkaResponseGetFailedException.java create mode 100644 ksql-core/src/main/java/io/confluent/ksql/exception/KafkaTopicClientException.java create mode 100644 ksql-core/src/main/java/io/confluent/ksql/exception/KafkaTopicException.java create mode 100644 ksql-core/src/main/java/io/confluent/ksql/exception/ParseFailedException.java create mode 100644 ksql-core/src/main/java/io/confluent/ksql/function/KsqlAggFunctionDeterminer.java create mode 100644 ksql-core/src/main/java/io/confluent/ksql/function/KsqlAggregateFunction.java create mode 100644 ksql-core/src/main/java/io/confluent/ksql/function/KsqlFunction.java create mode 100644 ksql-core/src/main/java/io/confluent/ksql/function/KsqlFunctionException.java create mode 100644 ksql-core/src/main/java/io/confluent/ksql/function/KsqlFunctions.java create mode 100644 ksql-core/src/main/java/io/confluent/ksql/function/udaf/KudafAggregator.java create mode 100644 ksql-core/src/main/java/io/confluent/ksql/function/udaf/KudafInitializer.java create mode 100644 ksql-core/src/main/java/io/confluent/ksql/function/udaf/count/CountAggFunctionDeterminer.java create mode 100644 ksql-core/src/main/java/io/confluent/ksql/function/udaf/count/CountKudaf.java create mode 100644 ksql-core/src/main/java/io/confluent/ksql/function/udaf/max/DoubleMaxKudaf.java create mode 100644 ksql-core/src/main/java/io/confluent/ksql/function/udaf/max/LongMaxKudaf.java create mode 100644 ksql-core/src/main/java/io/confluent/ksql/function/udaf/max/MaxAggFunctionDeterminer.java create mode 100644 ksql-core/src/main/java/io/confluent/ksql/function/udaf/min/DoubleMinKudaf.java create mode 100644 ksql-core/src/main/java/io/confluent/ksql/function/udaf/min/LongMinKudaf.java create mode 100644 ksql-core/src/main/java/io/confluent/ksql/function/udaf/min/MinAggFunctionDeterminer.java create mode 100644 ksql-core/src/main/java/io/confluent/ksql/function/udaf/sum/DoubleSumKudaf.java create mode 100644 ksql-core/src/main/java/io/confluent/ksql/function/udaf/sum/LongSumKudaf.java create mode 100644 ksql-core/src/main/java/io/confluent/ksql/function/udaf/sum/SumAggFunctionDeterminer.java create mode 100644 ksql-core/src/main/java/io/confluent/ksql/function/udf/Kudf.java create mode 100644 ksql-core/src/main/java/io/confluent/ksql/function/udf/datetime/StringToTimestamp.java create mode 100644 ksql-core/src/main/java/io/confluent/ksql/function/udf/datetime/TimestampToString.java create mode 100644 ksql-core/src/main/java/io/confluent/ksql/function/udf/json/JsonExtractStringKudf.java create mode 100644 ksql-core/src/main/java/io/confluent/ksql/function/udf/math/AbsKudf.java create mode 100644 ksql-core/src/main/java/io/confluent/ksql/function/udf/math/CeilKudf.java create mode 100644 ksql-core/src/main/java/io/confluent/ksql/function/udf/math/FloorKudf.java create mode 100644 ksql-core/src/main/java/io/confluent/ksql/function/udf/math/RandomKudf.java create mode 100644 ksql-core/src/main/java/io/confluent/ksql/function/udf/math/RoundKudf.java create mode 100644 ksql-core/src/main/java/io/confluent/ksql/function/udf/string/ConcatKudf.java create mode 100644 ksql-core/src/main/java/io/confluent/ksql/function/udf/string/IfNullKudf.java create mode 100644 ksql-core/src/main/java/io/confluent/ksql/function/udf/string/LCaseKudf.java create mode 100644 ksql-core/src/main/java/io/confluent/ksql/function/udf/string/LenKudf.java create mode 100644 ksql-core/src/main/java/io/confluent/ksql/function/udf/string/SubstringKudf.java create mode 100644 ksql-core/src/main/java/io/confluent/ksql/function/udf/string/TrimKudf.java create mode 100644 ksql-core/src/main/java/io/confluent/ksql/function/udf/string/UCaseKudf.java create mode 100644 ksql-core/src/main/java/io/confluent/ksql/function/udf/util/CastKudf.java create mode 100644 ksql-core/src/main/java/io/confluent/ksql/metastore/DataSource.java create mode 100644 ksql-core/src/main/java/io/confluent/ksql/metastore/KsqlStdOut.java create mode 100644 ksql-core/src/main/java/io/confluent/ksql/metastore/KsqlStream.java create mode 100644 ksql-core/src/main/java/io/confluent/ksql/metastore/KsqlTable.java create mode 100644 ksql-core/src/main/java/io/confluent/ksql/metastore/KsqlTopic.java create mode 100644 ksql-core/src/main/java/io/confluent/ksql/metastore/MetaStore.java create mode 100644 ksql-core/src/main/java/io/confluent/ksql/metastore/MetaStoreImpl.java create mode 100644 ksql-core/src/main/java/io/confluent/ksql/metastore/MetastoreUtil.java create mode 100644 ksql-core/src/main/java/io/confluent/ksql/metastore/StructuredDataSource.java create mode 100644 ksql-core/src/main/java/io/confluent/ksql/parser/AstBuilder.java create mode 100644 ksql-core/src/main/java/io/confluent/ksql/parser/CaseInsensitiveStream.java create mode 100644 ksql-core/src/main/java/io/confluent/ksql/parser/CodegenExpressionFormatter.java create mode 100644 ksql-core/src/main/java/io/confluent/ksql/parser/ExpressionFormatter.java create mode 100644 ksql-core/src/main/java/io/confluent/ksql/parser/KsqlParser.java create mode 100644 ksql-core/src/main/java/io/confluent/ksql/parser/KsqlParserErrorStrategy.java create mode 100644 ksql-core/src/main/java/io/confluent/ksql/parser/ParsingException.java create mode 100644 ksql-core/src/main/java/io/confluent/ksql/parser/SqlFormatter.java create mode 100644 ksql-core/src/main/java/io/confluent/ksql/parser/rewrite/AggregateExpressionRewriter.java create mode 100644 ksql-core/src/main/java/io/confluent/ksql/parser/rewrite/ExpressionFormatterQueryRewrite.java create mode 100644 ksql-core/src/main/java/io/confluent/ksql/parser/rewrite/SqlFormatterQueryRewrite.java create mode 100644 ksql-core/src/main/java/io/confluent/ksql/parser/tree/AbstractStreamCreateStatement.java create mode 100644 ksql-core/src/main/java/io/confluent/ksql/parser/tree/AbstractStreamDropStatement.java create mode 100644 ksql-core/src/main/java/io/confluent/ksql/parser/tree/AliasedRelation.java create mode 100644 ksql-core/src/main/java/io/confluent/ksql/parser/tree/AllColumns.java create mode 100644 ksql-core/src/main/java/io/confluent/ksql/parser/tree/ArithmeticBinaryExpression.java create mode 100644 ksql-core/src/main/java/io/confluent/ksql/parser/tree/ArithmeticUnaryExpression.java create mode 100644 ksql-core/src/main/java/io/confluent/ksql/parser/tree/AstVisitor.java create mode 100644 ksql-core/src/main/java/io/confluent/ksql/parser/tree/BetweenPredicate.java create mode 100644 ksql-core/src/main/java/io/confluent/ksql/parser/tree/BinaryLiteral.java create mode 100644 ksql-core/src/main/java/io/confluent/ksql/parser/tree/BooleanLiteral.java create mode 100644 ksql-core/src/main/java/io/confluent/ksql/parser/tree/Cast.java create mode 100644 ksql-core/src/main/java/io/confluent/ksql/parser/tree/ComparisonExpression.java create mode 100644 ksql-core/src/main/java/io/confluent/ksql/parser/tree/CreateStream.java create mode 100644 ksql-core/src/main/java/io/confluent/ksql/parser/tree/CreateStreamAsSelect.java create mode 100644 ksql-core/src/main/java/io/confluent/ksql/parser/tree/CreateTable.java create mode 100644 ksql-core/src/main/java/io/confluent/ksql/parser/tree/CreateTableAsSelect.java create mode 100644 ksql-core/src/main/java/io/confluent/ksql/parser/tree/CreateView.java create mode 100644 ksql-core/src/main/java/io/confluent/ksql/parser/tree/DecimalLiteral.java create mode 100644 ksql-core/src/main/java/io/confluent/ksql/parser/tree/DefaultAstVisitor.java create mode 100644 ksql-core/src/main/java/io/confluent/ksql/parser/tree/DefaultExpressionTraversalVisitor.java create mode 100644 ksql-core/src/main/java/io/confluent/ksql/parser/tree/DefaultTraversalVisitor.java create mode 100644 ksql-core/src/main/java/io/confluent/ksql/parser/tree/Delete.java create mode 100644 ksql-core/src/main/java/io/confluent/ksql/parser/tree/DereferenceExpression.java create mode 100644 ksql-core/src/main/java/io/confluent/ksql/parser/tree/DoubleLiteral.java create mode 100644 ksql-core/src/main/java/io/confluent/ksql/parser/tree/DropStream.java create mode 100644 ksql-core/src/main/java/io/confluent/ksql/parser/tree/DropTable.java create mode 100644 ksql-core/src/main/java/io/confluent/ksql/parser/tree/DropTopic.java create mode 100644 ksql-core/src/main/java/io/confluent/ksql/parser/tree/DropView.java create mode 100644 ksql-core/src/main/java/io/confluent/ksql/parser/tree/EmptyStatement.java create mode 100644 ksql-core/src/main/java/io/confluent/ksql/parser/tree/Except.java create mode 100644 ksql-core/src/main/java/io/confluent/ksql/parser/tree/ExistsPredicate.java create mode 100644 ksql-core/src/main/java/io/confluent/ksql/parser/tree/Explain.java create mode 100644 ksql-core/src/main/java/io/confluent/ksql/parser/tree/ExplainFormat.java create mode 100644 ksql-core/src/main/java/io/confluent/ksql/parser/tree/ExplainOption.java create mode 100644 ksql-core/src/main/java/io/confluent/ksql/parser/tree/ExplainType.java create mode 100644 ksql-core/src/main/java/io/confluent/ksql/parser/tree/ExportCatalog.java create mode 100644 ksql-core/src/main/java/io/confluent/ksql/parser/tree/Expression.java create mode 100644 ksql-core/src/main/java/io/confluent/ksql/parser/tree/ExpressionRewriter.java create mode 100644 ksql-core/src/main/java/io/confluent/ksql/parser/tree/ExpressionTreeRewriter.java create mode 100644 ksql-core/src/main/java/io/confluent/ksql/parser/tree/Extract.java create mode 100644 ksql-core/src/main/java/io/confluent/ksql/parser/tree/FieldReference.java create mode 100644 ksql-core/src/main/java/io/confluent/ksql/parser/tree/FrameBound.java create mode 100644 ksql-core/src/main/java/io/confluent/ksql/parser/tree/FunctionCall.java create mode 100644 ksql-core/src/main/java/io/confluent/ksql/parser/tree/GenericLiteral.java create mode 100644 ksql-core/src/main/java/io/confluent/ksql/parser/tree/GroupBy.java create mode 100644 ksql-core/src/main/java/io/confluent/ksql/parser/tree/GroupingElement.java create mode 100644 ksql-core/src/main/java/io/confluent/ksql/parser/tree/GroupingSets.java create mode 100644 ksql-core/src/main/java/io/confluent/ksql/parser/tree/HoppingWindowExpression.java create mode 100644 ksql-core/src/main/java/io/confluent/ksql/parser/tree/InListExpression.java create mode 100644 ksql-core/src/main/java/io/confluent/ksql/parser/tree/InPredicate.java create mode 100644 ksql-core/src/main/java/io/confluent/ksql/parser/tree/Intersect.java create mode 100644 ksql-core/src/main/java/io/confluent/ksql/parser/tree/IntervalLiteral.java create mode 100644 ksql-core/src/main/java/io/confluent/ksql/parser/tree/IsNotNullPredicate.java create mode 100644 ksql-core/src/main/java/io/confluent/ksql/parser/tree/IsNullPredicate.java create mode 100644 ksql-core/src/main/java/io/confluent/ksql/parser/tree/Join.java create mode 100644 ksql-core/src/main/java/io/confluent/ksql/parser/tree/JoinCriteria.java create mode 100644 ksql-core/src/main/java/io/confluent/ksql/parser/tree/JoinOn.java create mode 100644 ksql-core/src/main/java/io/confluent/ksql/parser/tree/JoinUsing.java create mode 100644 ksql-core/src/main/java/io/confluent/ksql/parser/tree/KsqlWindowExpression.java create mode 100644 ksql-core/src/main/java/io/confluent/ksql/parser/tree/LambdaExpression.java create mode 100644 ksql-core/src/main/java/io/confluent/ksql/parser/tree/LikePredicate.java create mode 100644 ksql-core/src/main/java/io/confluent/ksql/parser/tree/ListProperties.java create mode 100644 ksql-core/src/main/java/io/confluent/ksql/parser/tree/ListQueries.java create mode 100644 ksql-core/src/main/java/io/confluent/ksql/parser/tree/ListRegisteredTopics.java create mode 100644 ksql-core/src/main/java/io/confluent/ksql/parser/tree/ListStreams.java create mode 100644 ksql-core/src/main/java/io/confluent/ksql/parser/tree/ListTables.java create mode 100644 ksql-core/src/main/java/io/confluent/ksql/parser/tree/ListTopics.java create mode 100644 ksql-core/src/main/java/io/confluent/ksql/parser/tree/Literal.java create mode 100644 ksql-core/src/main/java/io/confluent/ksql/parser/tree/LoadProperties.java create mode 100644 ksql-core/src/main/java/io/confluent/ksql/parser/tree/LogicalBinaryExpression.java create mode 100644 ksql-core/src/main/java/io/confluent/ksql/parser/tree/LongLiteral.java create mode 100644 ksql-core/src/main/java/io/confluent/ksql/parser/tree/NaturalJoin.java create mode 100644 ksql-core/src/main/java/io/confluent/ksql/parser/tree/Node.java create mode 100644 ksql-core/src/main/java/io/confluent/ksql/parser/tree/NodeLocation.java create mode 100644 ksql-core/src/main/java/io/confluent/ksql/parser/tree/NotExpression.java create mode 100644 ksql-core/src/main/java/io/confluent/ksql/parser/tree/NullIfExpression.java create mode 100644 ksql-core/src/main/java/io/confluent/ksql/parser/tree/NullLiteral.java create mode 100644 ksql-core/src/main/java/io/confluent/ksql/parser/tree/PrintTopic.java create mode 100644 ksql-core/src/main/java/io/confluent/ksql/parser/tree/QualifiedName.java create mode 100644 ksql-core/src/main/java/io/confluent/ksql/parser/tree/QualifiedNameReference.java create mode 100644 ksql-core/src/main/java/io/confluent/ksql/parser/tree/Query.java create mode 100644 ksql-core/src/main/java/io/confluent/ksql/parser/tree/QueryBody.java create mode 100644 ksql-core/src/main/java/io/confluent/ksql/parser/tree/QuerySpecification.java create mode 100644 ksql-core/src/main/java/io/confluent/ksql/parser/tree/RegisterTopic.java create mode 100644 ksql-core/src/main/java/io/confluent/ksql/parser/tree/Relation.java create mode 100644 ksql-core/src/main/java/io/confluent/ksql/parser/tree/RenameColumn.java create mode 100644 ksql-core/src/main/java/io/confluent/ksql/parser/tree/RenameTable.java create mode 100644 ksql-core/src/main/java/io/confluent/ksql/parser/tree/Row.java create mode 100644 ksql-core/src/main/java/io/confluent/ksql/parser/tree/RunScript.java create mode 100644 ksql-core/src/main/java/io/confluent/ksql/parser/tree/SampledRelation.java create mode 100644 ksql-core/src/main/java/io/confluent/ksql/parser/tree/SearchedCaseExpression.java create mode 100644 ksql-core/src/main/java/io/confluent/ksql/parser/tree/Select.java create mode 100644 ksql-core/src/main/java/io/confluent/ksql/parser/tree/SelectItem.java create mode 100644 ksql-core/src/main/java/io/confluent/ksql/parser/tree/SessionWindowExpression.java create mode 100644 ksql-core/src/main/java/io/confluent/ksql/parser/tree/SetOperation.java create mode 100644 ksql-core/src/main/java/io/confluent/ksql/parser/tree/SetProperty.java create mode 100644 ksql-core/src/main/java/io/confluent/ksql/parser/tree/SetSession.java create mode 100644 ksql-core/src/main/java/io/confluent/ksql/parser/tree/ShowCatalogs.java create mode 100644 ksql-core/src/main/java/io/confluent/ksql/parser/tree/ShowColumns.java create mode 100644 ksql-core/src/main/java/io/confluent/ksql/parser/tree/ShowCreate.java create mode 100644 ksql-core/src/main/java/io/confluent/ksql/parser/tree/ShowFunctions.java create mode 100644 ksql-core/src/main/java/io/confluent/ksql/parser/tree/ShowPartitions.java create mode 100644 ksql-core/src/main/java/io/confluent/ksql/parser/tree/ShowSchemas.java create mode 100644 ksql-core/src/main/java/io/confluent/ksql/parser/tree/ShowSession.java create mode 100644 ksql-core/src/main/java/io/confluent/ksql/parser/tree/SimpleCaseExpression.java create mode 100644 ksql-core/src/main/java/io/confluent/ksql/parser/tree/SimpleGroupBy.java create mode 100644 ksql-core/src/main/java/io/confluent/ksql/parser/tree/SingleColumn.java create mode 100644 ksql-core/src/main/java/io/confluent/ksql/parser/tree/SortItem.java create mode 100644 ksql-core/src/main/java/io/confluent/ksql/parser/tree/StackableAstVisitor.java create mode 100644 ksql-core/src/main/java/io/confluent/ksql/parser/tree/Statement.java create mode 100644 ksql-core/src/main/java/io/confluent/ksql/parser/tree/Statements.java create mode 100644 ksql-core/src/main/java/io/confluent/ksql/parser/tree/StringLiteral.java create mode 100644 ksql-core/src/main/java/io/confluent/ksql/parser/tree/SubqueryExpression.java create mode 100644 ksql-core/src/main/java/io/confluent/ksql/parser/tree/SubscriptExpression.java create mode 100644 ksql-core/src/main/java/io/confluent/ksql/parser/tree/SymbolReference.java create mode 100644 ksql-core/src/main/java/io/confluent/ksql/parser/tree/Table.java create mode 100644 ksql-core/src/main/java/io/confluent/ksql/parser/tree/TableElement.java create mode 100644 ksql-core/src/main/java/io/confluent/ksql/parser/tree/TableSubquery.java create mode 100644 ksql-core/src/main/java/io/confluent/ksql/parser/tree/TerminateQuery.java create mode 100644 ksql-core/src/main/java/io/confluent/ksql/parser/tree/TimeLiteral.java create mode 100644 ksql-core/src/main/java/io/confluent/ksql/parser/tree/TimestampLiteral.java create mode 100644 ksql-core/src/main/java/io/confluent/ksql/parser/tree/TumblingWindowExpression.java create mode 100644 ksql-core/src/main/java/io/confluent/ksql/parser/tree/Union.java create mode 100644 ksql-core/src/main/java/io/confluent/ksql/parser/tree/UnsetProperty.java create mode 100644 ksql-core/src/main/java/io/confluent/ksql/parser/tree/Values.java create mode 100644 ksql-core/src/main/java/io/confluent/ksql/parser/tree/WhenClause.java create mode 100644 ksql-core/src/main/java/io/confluent/ksql/parser/tree/Window.java create mode 100644 ksql-core/src/main/java/io/confluent/ksql/parser/tree/WindowExpression.java create mode 100644 ksql-core/src/main/java/io/confluent/ksql/parser/tree/WindowFrame.java create mode 100644 ksql-core/src/main/java/io/confluent/ksql/parser/tree/WindowName.java create mode 100644 ksql-core/src/main/java/io/confluent/ksql/parser/tree/With.java create mode 100644 ksql-core/src/main/java/io/confluent/ksql/parser/tree/WithQuery.java create mode 100644 ksql-core/src/main/java/io/confluent/ksql/physical/GenericRow.java create mode 100644 ksql-core/src/main/java/io/confluent/ksql/physical/PhysicalPlanBuilder.java create mode 100644 ksql-core/src/main/java/io/confluent/ksql/planner/DefaultTraversalVisitor.java create mode 100644 ksql-core/src/main/java/io/confluent/ksql/planner/LogicalPlanner.java create mode 100644 ksql-core/src/main/java/io/confluent/ksql/planner/PlanException.java create mode 100644 ksql-core/src/main/java/io/confluent/ksql/planner/plan/AggregateNode.java create mode 100644 ksql-core/src/main/java/io/confluent/ksql/planner/plan/FilterNode.java create mode 100644 ksql-core/src/main/java/io/confluent/ksql/planner/plan/JoinNode.java create mode 100644 ksql-core/src/main/java/io/confluent/ksql/planner/plan/KsqlBareOutputNode.java create mode 100644 ksql-core/src/main/java/io/confluent/ksql/planner/plan/KsqlStructuredDataOutputNode.java create mode 100644 ksql-core/src/main/java/io/confluent/ksql/planner/plan/OutputNode.java create mode 100644 ksql-core/src/main/java/io/confluent/ksql/planner/plan/PlanNode.java create mode 100644 ksql-core/src/main/java/io/confluent/ksql/planner/plan/PlanNodeId.java create mode 100644 ksql-core/src/main/java/io/confluent/ksql/planner/plan/PlanVisitor.java create mode 100644 ksql-core/src/main/java/io/confluent/ksql/planner/plan/ProjectNode.java create mode 100644 ksql-core/src/main/java/io/confluent/ksql/planner/plan/SourceNode.java create mode 100644 ksql-core/src/main/java/io/confluent/ksql/planner/plan/StructuredDataSourceNode.java create mode 100644 ksql-core/src/main/java/io/confluent/ksql/serde/KsqlTopicSerDe.java create mode 100644 ksql-core/src/main/java/io/confluent/ksql/serde/avro/KsqlAvroTopicSerDe.java create mode 100644 ksql-core/src/main/java/io/confluent/ksql/serde/avro/KsqlGenericRowAvroDeserializer.java create mode 100644 ksql-core/src/main/java/io/confluent/ksql/serde/avro/KsqlGenericRowAvroSerializer.java create mode 100644 ksql-core/src/main/java/io/confluent/ksql/serde/delimited/KsqlDelimitedDeserializer.java create mode 100644 ksql-core/src/main/java/io/confluent/ksql/serde/delimited/KsqlDelimitedSerializer.java create mode 100644 ksql-core/src/main/java/io/confluent/ksql/serde/delimited/KsqlDelimitedTopicSerDe.java create mode 100644 ksql-core/src/main/java/io/confluent/ksql/serde/json/KsqlJsonDeserializer.java create mode 100644 ksql-core/src/main/java/io/confluent/ksql/serde/json/KsqlJsonSerializer.java create mode 100644 ksql-core/src/main/java/io/confluent/ksql/serde/json/KsqlJsonTopicSerDe.java create mode 100644 ksql-core/src/main/java/io/confluent/ksql/structured/QueuedSchemaKStream.java create mode 100644 ksql-core/src/main/java/io/confluent/ksql/structured/SchemaKGroupedStream.java create mode 100644 ksql-core/src/main/java/io/confluent/ksql/structured/SchemaKStream.java create mode 100644 ksql-core/src/main/java/io/confluent/ksql/structured/SchemaKTable.java create mode 100644 ksql-core/src/main/java/io/confluent/ksql/structured/SqlPredicate.java create mode 100644 ksql-core/src/main/java/io/confluent/ksql/util/DataSourceExtractor.java create mode 100644 ksql-core/src/main/java/io/confluent/ksql/util/ExpressionMetadata.java create mode 100644 ksql-core/src/main/java/io/confluent/ksql/util/ExpressionTypeManager.java create mode 100644 ksql-core/src/main/java/io/confluent/ksql/util/ExpressionUtil.java create mode 100644 ksql-core/src/main/java/io/confluent/ksql/util/GenericRowValueTypeEnforcer.java create mode 100644 ksql-core/src/main/java/io/confluent/ksql/util/KafkaTopicClient.java create mode 100644 ksql-core/src/main/java/io/confluent/ksql/util/KafkaTopicClientImpl.java create mode 100644 ksql-core/src/main/java/io/confluent/ksql/util/KsqlConfig.java create mode 100644 ksql-core/src/main/java/io/confluent/ksql/util/KsqlException.java create mode 100644 ksql-core/src/main/java/io/confluent/ksql/util/KsqlPreconditions.java create mode 100644 ksql-core/src/main/java/io/confluent/ksql/util/Pair.java create mode 100644 ksql-core/src/main/java/io/confluent/ksql/util/PersistentQueryMetadata.java create mode 100644 ksql-core/src/main/java/io/confluent/ksql/util/QueryMetadata.java create mode 100644 ksql-core/src/main/java/io/confluent/ksql/util/QueuedQueryMetadata.java create mode 100644 ksql-core/src/main/java/io/confluent/ksql/util/SchemaUtil.java create mode 100644 ksql-core/src/main/java/io/confluent/ksql/util/SerDeUtil.java create mode 100644 ksql-core/src/main/java/io/confluent/ksql/util/StringUtil.java create mode 100644 ksql-core/src/main/java/io/confluent/ksql/util/Version.java create mode 100644 ksql-core/src/main/java/io/confluent/ksql/util/WindowedSerde.java create mode 100644 ksql-core/src/main/java/io/confluent/ksql/util/json/JsonPathTokenizer.java create mode 100644 ksql-core/src/main/java/io/confluent/ksql/util/timestamp/KsqlTimestampExtractor.java create mode 100644 ksql-core/src/main/resources/checkstyle.xml create mode 100644 ksql-core/src/main/resources/ksql-version.properties create mode 100644 ksql-core/src/main/resources/log4j.properties create mode 100644 ksql-core/src/test/java/io/confluent/ksql/analyzer/AggregateAnalyzerTest.java create mode 100644 ksql-core/src/test/java/io/confluent/ksql/analyzer/AnalyzerTest.java create mode 100644 ksql-core/src/test/java/io/confluent/ksql/integtests/json/JsonFormatTest.java create mode 100644 ksql-core/src/test/java/io/confluent/ksql/metastore/MetastoreTest.java create mode 100644 ksql-core/src/test/java/io/confluent/ksql/metastore/MetastoreUtilTest.java create mode 100644 ksql-core/src/test/java/io/confluent/ksql/parser/KsqlParserTest.java create mode 100644 ksql-core/src/test/java/io/confluent/ksql/physical/PhysicalPlanBuilderTest.java create mode 100644 ksql-core/src/test/java/io/confluent/ksql/planner/LogicalPlannerTest.java create mode 100644 ksql-core/src/test/java/io/confluent/ksql/structured/SchemaKStreamTest.java create mode 100644 ksql-core/src/test/java/io/confluent/ksql/structured/SchemaKTableTest.java create mode 100644 ksql-core/src/test/java/io/confluent/ksql/structured/SqlPredicateTest.java create mode 100644 ksql-core/src/test/java/io/confluent/ksql/testutils/EmbeddedSingleNodeKafkaCluster.java create mode 100644 ksql-core/src/test/java/io/confluent/ksql/testutils/IntegrationTestUtils.java create mode 100644 ksql-core/src/test/java/io/confluent/ksql/testutils/KafkaEmbedded.java create mode 100644 ksql-core/src/test/java/io/confluent/ksql/testutils/ZooKeeperEmbedded.java create mode 100644 ksql-core/src/test/java/io/confluent/ksql/util/ExpressionTypeManagerTest.java create mode 100644 ksql-core/src/test/java/io/confluent/ksql/util/ExpressionUtilTest.java create mode 100644 ksql-core/src/test/java/io/confluent/ksql/util/FakeKafkaTopicClient.java create mode 100644 ksql-core/src/test/java/io/confluent/ksql/util/KsqlTestUtil.java create mode 100644 ksql-core/src/test/java/io/confluent/ksql/util/OrderDataProvider.java create mode 100644 ksql-core/src/test/java/io/confluent/ksql/util/SchemaUtilTest.java create mode 100644 ksql-core/src/test/java/io/confluent/ksql/util/TestDataProvider.java create mode 100644 ksql-core/src/test/java/io/confluent/ksql/util/TopicConsumer.java create mode 100644 ksql-core/src/test/java/io/confluent/ksql/util/TopicProducer.java create mode 100644 ksql-core/src/test/java/io/confluent/ksql/util/json/JsonPathTokenizerTest.java create mode 100644 ksql-core/src/test/resources/TestCatalog.json create mode 100644 ksql-core/src/test/resources/avro_order_schema.avro create mode 100644 ksql-examples/Dockerfile create mode 100644 ksql-examples/README.md create mode 100644 ksql-examples/examples/clickstream-analysis/README.md create mode 100644 ksql-examples/examples/clickstream-analysis/clickstream-analysis-dashboard.json create mode 100755 ksql-examples/examples/clickstream-analysis/clickstream-analysis-dashboard.sh create mode 100644 ksql-examples/examples/clickstream-analysis/clickstream-schema.sql create mode 100644 ksql-examples/examples/clickstream-analysis/connect-config/README.md create mode 100644 ksql-examples/examples/clickstream-analysis/connect-config/connect.properties create mode 100644 ksql-examples/examples/clickstream-analysis/connect-config/null-filter-4.0.0-SNAPSHOT.jar create mode 100755 ksql-examples/examples/clickstream-analysis/elastic-dynamic-template.sh create mode 100755 ksql-examples/examples/clickstream-analysis/ksql-connect-es-grafana.sh create mode 100755 ksql-examples/examples/clickstream-analysis/ksql-tables-to-grafana.sh create mode 100644 ksql-examples/examples/clickstream-analysis/malicious-users-dashboard.json create mode 100755 ksql-examples/examples/clickstream-analysis/malicious-users-dashboard.sh create mode 100755 ksql-examples/examples/clickstream-analysis/run-grafana.sh create mode 100644 ksql-examples/pom.xml create mode 100644 ksql-examples/src/assembly/development.xml create mode 100644 ksql-examples/src/assembly/package.xml create mode 100644 ksql-examples/src/assembly/standalone.xml create mode 100644 ksql-examples/src/main/java/io/confluent/avro/random/generator/Generator.java create mode 100644 ksql-examples/src/main/java/io/confluent/avro/random/generator/Main.java create mode 100644 ksql-examples/src/main/java/io/confluent/ksql/datagen/AvroConsumer.java create mode 100644 ksql-examples/src/main/java/io/confluent/ksql/datagen/AvroProducer.java create mode 100644 ksql-examples/src/main/java/io/confluent/ksql/datagen/DataGen.java create mode 100644 ksql-examples/src/main/java/io/confluent/ksql/datagen/DataGenProducer.java create mode 100644 ksql-examples/src/main/java/io/confluent/ksql/datagen/DelimitedConsumer.java create mode 100644 ksql-examples/src/main/java/io/confluent/ksql/datagen/DelimitedProducer.java create mode 100644 ksql-examples/src/main/java/io/confluent/ksql/datagen/JsonConsumer.java create mode 100644 ksql-examples/src/main/java/io/confluent/ksql/datagen/JsonProducer.java create mode 100644 ksql-examples/src/main/java/io/confluent/ksql/datagen/SessionManager.java create mode 100644 ksql-examples/src/main/java/io/confluent/ksql/embedded/EmbeddedKsql.java create mode 100644 ksql-examples/src/main/resources/SampleQueries.sql create mode 100644 ksql-examples/src/main/resources/clickstream_codes_schema.avro create mode 100644 ksql-examples/src/main/resources/clickstream_schema.avro create mode 100644 ksql-examples/src/main/resources/clickstream_users_schema.avro create mode 100644 ksql-examples/src/main/resources/ksql-server.properties create mode 100644 ksql-examples/src/main/resources/orders_schema.avro create mode 100644 ksql-examples/src/main/resources/pageviews_schema.avro create mode 100644 ksql-examples/src/main/resources/users_schema.avro create mode 100644 ksql-examples/src/main/test/io/confluent/ksql/datagen/SessionManagerTest.java create mode 100644 ksql-rest-app/pom.xml create mode 100644 ksql-rest-app/src/assembly/development.xml create mode 100644 ksql-rest-app/src/assembly/package.xml create mode 100644 ksql-rest-app/src/assembly/standalone.xml create mode 100644 ksql-rest-app/src/main/java/io/confluent/ksql/rest/client/KsqlRestClient.java create mode 100644 ksql-rest-app/src/main/java/io/confluent/ksql/rest/client/RestResponse.java create mode 100644 ksql-rest-app/src/main/java/io/confluent/ksql/rest/entity/CommandStatus.java create mode 100644 ksql-rest-app/src/main/java/io/confluent/ksql/rest/entity/CommandStatusEntity.java create mode 100644 ksql-rest-app/src/main/java/io/confluent/ksql/rest/entity/CommandStatuses.java create mode 100644 ksql-rest-app/src/main/java/io/confluent/ksql/rest/entity/ErrorMessage.java create mode 100644 ksql-rest-app/src/main/java/io/confluent/ksql/rest/entity/ErrorMessageEntity.java create mode 100644 ksql-rest-app/src/main/java/io/confluent/ksql/rest/entity/ExecutionPlan.java create mode 100644 ksql-rest-app/src/main/java/io/confluent/ksql/rest/entity/KafkaTopicInfo.java create mode 100644 ksql-rest-app/src/main/java/io/confluent/ksql/rest/entity/KafkaTopicsList.java create mode 100644 ksql-rest-app/src/main/java/io/confluent/ksql/rest/entity/KsqlEntity.java create mode 100644 ksql-rest-app/src/main/java/io/confluent/ksql/rest/entity/KsqlEntityList.java create mode 100644 ksql-rest-app/src/main/java/io/confluent/ksql/rest/entity/KsqlRequest.java create mode 100644 ksql-rest-app/src/main/java/io/confluent/ksql/rest/entity/KsqlTopicInfo.java create mode 100644 ksql-rest-app/src/main/java/io/confluent/ksql/rest/entity/KsqlTopicsList.java create mode 100644 ksql-rest-app/src/main/java/io/confluent/ksql/rest/entity/PropertiesList.java create mode 100644 ksql-rest-app/src/main/java/io/confluent/ksql/rest/entity/Queries.java create mode 100644 ksql-rest-app/src/main/java/io/confluent/ksql/rest/entity/SchemaMapper.java create mode 100644 ksql-rest-app/src/main/java/io/confluent/ksql/rest/entity/ServerInfo.java create mode 100644 ksql-rest-app/src/main/java/io/confluent/ksql/rest/entity/SourceDescription.java create mode 100644 ksql-rest-app/src/main/java/io/confluent/ksql/rest/entity/StreamedRow.java create mode 100644 ksql-rest-app/src/main/java/io/confluent/ksql/rest/entity/StreamsList.java create mode 100644 ksql-rest-app/src/main/java/io/confluent/ksql/rest/entity/TablesList.java create mode 100644 ksql-rest-app/src/main/java/io/confluent/ksql/rest/entity/TopicDescription.java create mode 100644 ksql-rest-app/src/main/java/io/confluent/ksql/rest/server/CliOptions.java create mode 100644 ksql-rest-app/src/main/java/io/confluent/ksql/rest/server/KsqlRestApplication.java create mode 100644 ksql-rest-app/src/main/java/io/confluent/ksql/rest/server/KsqlRestConfig.java create mode 100644 ksql-rest-app/src/main/java/io/confluent/ksql/rest/server/StatementParser.java create mode 100644 ksql-rest-app/src/main/java/io/confluent/ksql/rest/server/computation/Command.java create mode 100644 ksql-rest-app/src/main/java/io/confluent/ksql/rest/server/computation/CommandId.java create mode 100644 ksql-rest-app/src/main/java/io/confluent/ksql/rest/server/computation/CommandIdAssigner.java create mode 100644 ksql-rest-app/src/main/java/io/confluent/ksql/rest/server/computation/CommandRunner.java create mode 100644 ksql-rest-app/src/main/java/io/confluent/ksql/rest/server/computation/CommandStore.java create mode 100644 ksql-rest-app/src/main/java/io/confluent/ksql/rest/server/computation/StatementExecutor.java create mode 100644 ksql-rest-app/src/main/java/io/confluent/ksql/rest/server/resources/KsqlExceptionMapper.java create mode 100644 ksql-rest-app/src/main/java/io/confluent/ksql/rest/server/resources/KsqlResource.java create mode 100644 ksql-rest-app/src/main/java/io/confluent/ksql/rest/server/resources/ServerInfoResource.java create mode 100644 ksql-rest-app/src/main/java/io/confluent/ksql/rest/server/resources/StatusResource.java create mode 100644 ksql-rest-app/src/main/java/io/confluent/ksql/rest/server/resources/streaming/QueryRowWriter.java create mode 100644 ksql-rest-app/src/main/java/io/confluent/ksql/rest/server/resources/streaming/QueryStreamWriter.java create mode 100644 ksql-rest-app/src/main/java/io/confluent/ksql/rest/server/resources/streaming/StreamedQueryResource.java create mode 100644 ksql-rest-app/src/main/java/io/confluent/ksql/rest/server/resources/streaming/TopicStreamWriter.java create mode 100644 ksql-rest-app/src/main/resources/io/confluent/ksql/rest/quickstart.html create mode 100644 ksql-rest-app/src/main/resources/log4j.properties create mode 100644 ksql-rest-app/src/test/java/io/confluent/ksql/rest/client/KsqlRestClientTest.java create mode 100644 ksql-rest-app/src/test/java/io/confluent/ksql/rest/server/KsqlRestConfigTest.java create mode 100644 ksql-rest-app/src/test/java/io/confluent/ksql/rest/server/computation/CommandRunnerTest.java create mode 100644 ksql-rest-app/src/test/java/io/confluent/ksql/rest/server/computation/StatementExecutorTest.java create mode 100644 ksql-rest-app/src/test/java/io/confluent/ksql/rest/server/mock/MockApplication.java create mode 100644 ksql-rest-app/src/test/java/io/confluent/ksql/rest/server/mock/MockCommandStore.java create mode 100644 ksql-rest-app/src/test/java/io/confluent/ksql/rest/server/mock/MockKafkaTopicClient.java create mode 100644 ksql-rest-app/src/test/java/io/confluent/ksql/rest/server/mock/MockKsqkEngine.java create mode 100644 ksql-rest-app/src/test/java/io/confluent/ksql/rest/server/mock/MockKsqlResources.java create mode 100644 ksql-rest-app/src/test/java/io/confluent/ksql/rest/server/mock/MockStatusResource.java create mode 100644 ksql-rest-app/src/test/java/io/confluent/ksql/rest/server/mock/MockStreamedQueryResource.java create mode 100644 ksql-rest-app/src/test/java/io/confluent/ksql/rest/server/resources/KsqlResourceTest.java create mode 100644 ksql-rest-app/src/test/java/io/confluent/ksql/rest/server/resources/StatusResourceTest.java create mode 100644 ksql-rest-app/src/test/java/io/confluent/ksql/rest/server/resources/StreamedQueryResourceTest.java create mode 100644 ksql-rest-app/src/test/java/io/confluent/ksql/rest/server/utils/TestUtils.java create mode 100644 licenses/LICENSE-annotations-3.0.1.txt create mode 100644 licenses/LICENSE-antlr4-runtime-4.7.txt create mode 100644 licenses/LICENSE-avro-1.8.1.txt create mode 100644 licenses/LICENSE-avro-random-generator-0.1-20170531.181813-9.txt create mode 100644 licenses/LICENSE-commons-collections4-4.0.txt create mode 100644 licenses/LICENSE-commons-compress-1.8.1.txt create mode 100644 licenses/LICENSE-commons-csv-1.4.txt create mode 100644 licenses/LICENSE-commons-lang3-3.3.2.txt create mode 100644 licenses/LICENSE-generex-1.0.1.txt create mode 100644 licenses/LICENSE-jackson-annotations-2.8.0.txt create mode 100644 licenses/LICENSE-jackson-core-2.8.4.txt create mode 100644 licenses/LICENSE-jackson-core-2.8.5.txt create mode 100644 licenses/LICENSE-jackson-core-asl-1.9.13.txt create mode 100644 licenses/LICENSE-jackson-databind-2.8.4.txt create mode 100644 licenses/LICENSE-jackson-databind-2.8.5.txt create mode 100644 licenses/LICENSE-jackson-mapper-asl-1.9.13.txt create mode 100644 licenses/LICENSE-jline-3.3.1.txt create mode 100644 licenses/LICENSE-jol-core-0.2.txt create mode 100644 licenses/LICENSE-jopt-simple-5.0.4.txt create mode 100644 licenses/LICENSE-jsr305-3.0.2.txt create mode 100644 licenses/LICENSE-log4j-1.2.17.txt create mode 100644 licenses/LICENSE-netty-3.7.0.Final.txt create mode 100644 licenses/LICENSE-snappy-java-1.1.1.3.txt create mode 100644 licenses/LICENSE-snappy-java-1.1.4.txt create mode 100644 licenses/LICENSE-zookeeper-3.4.8.txt create mode 100644 licenses/licenses.html create mode 100644 notices/NOTICE-avro-1.8.1.txt create mode 100644 notices/NOTICE-jackson-core-2.8.5.txt create mode 100644 notices/NOTICE-jackson-core-asl-1.9.13.txt create mode 100644 notices/NOTICE-jackson-databind-2.8.5.txt create mode 100644 notices/NOTICE-jackson-mapper-asl-1.9.13.txt create mode 100644 notices/NOTICE-log4j-1.2.17.txt create mode 100644 pom.xml create mode 100644 quickstart/docker-compose.yml create mode 100644 quickstart/ksql-quickstart-schemas.jpg create mode 100644 quickstart/quickstart-docker.rst create mode 100644 quickstart/quickstart-non-docker.rst create mode 100644 quickstart/quickstart.rst diff --git a/.gitignore b/.gitignore new file mode 100644 index 000000000000..84ebc64e91c1 --- /dev/null +++ b/.gitignore @@ -0,0 +1,23 @@ +# Build products +target/ +docs/_build/ +dist/ + +# IntelliJ data +*.iml +*.iws +*.ipr +.idea/ + +# Other +.DS_Store +.dumbjump +*classes +*~ +*# +.#* +.classpath +/.metadata +.project +.settings +.tern-port diff --git a/LICENSE b/LICENSE new file mode 100644 index 000000000000..ad410e113021 --- /dev/null +++ b/LICENSE @@ -0,0 +1,201 @@ +Apache License + Version 2.0, January 2004 + http://www.apache.org/licenses/ + + TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION + + 1. Definitions. + + "License" shall mean the terms and conditions for use, reproduction, + and distribution as defined by Sections 1 through 9 of this document. + + "Licensor" shall mean the copyright owner or entity authorized by + the copyright owner that is granting the License. + + "Legal Entity" shall mean the union of the acting entity and all + other entities that control, are controlled by, or are under common + control with that entity. For the purposes of this definition, + "control" means (i) the power, direct or indirect, to cause the + direction or management of such entity, whether by contract or + otherwise, or (ii) ownership of fifty percent (50%) or more of the + outstanding shares, or (iii) beneficial ownership of such entity. + + "You" (or "Your") shall mean an individual or Legal Entity + exercising permissions granted by this License. + + "Source" form shall mean the preferred form for making modifications, + including but not limited to software source code, documentation + source, and configuration files. + + "Object" form shall mean any form resulting from mechanical + transformation or translation of a Source form, including but + not limited to compiled object code, generated documentation, + and conversions to other media types. + + "Work" shall mean the work of authorship, whether in Source or + Object form, made available under the License, as indicated by a + copyright notice that is included in or attached to the work + (an example is provided in the Appendix below). + + "Derivative Works" shall mean any work, whether in Source or Object + form, that is based on (or derived from) the Work and for which the + editorial revisions, annotations, elaborations, or other modifications + represent, as a whole, an original work of authorship. For the purposes + of this License, Derivative Works shall not include works that remain + separable from, or merely link (or bind by name) to the interfaces of, + the Work and Derivative Works thereof. + + "Contribution" shall mean any work of authorship, including + the original version of the Work and any modifications or additions + to that Work or Derivative Works thereof, that is intentionally + submitted to Licensor for inclusion in the Work by the copyright owner + or by an individual or Legal Entity authorized to submit on behalf of + the copyright owner. For the purposes of this definition, "submitted" + means any form of electronic, verbal, or written communication sent + to the Licensor or its representatives, including but not limited to + communication on electronic mailing lists, source code control systems, + and issue tracking systems that are managed by, or on behalf of, the + Licensor for the purpose of discussing and improving the Work, but + excluding communication that is conspicuously marked or otherwise + designated in writing by the copyright owner as "Not a Contribution." + + "Contributor" shall mean Licensor and any individual or Legal Entity + on behalf of whom a Contribution has been received by Licensor and + subsequently incorporated within the Work. + + 2. Grant of Copyright License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + copyright license to reproduce, prepare Derivative Works of, + publicly display, publicly perform, sublicense, and distribute the + Work and such Derivative Works in Source or Object form. + + 3. Grant of Patent License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + (except as stated in this section) patent license to make, have made, + use, offer to sell, sell, import, and otherwise transfer the Work, + where such license applies only to those patent claims licensable + by such Contributor that are necessarily infringed by their + Contribution(s) alone or by combination of their Contribution(s) + with the Work to which such Contribution(s) was submitted. If You + institute patent litigation against any entity (including a + cross-claim or counterclaim in a lawsuit) alleging that the Work + or a Contribution incorporated within the Work constitutes direct + or contributory patent infringement, then any patent licenses + granted to You under this License for that Work shall terminate + as of the date such litigation is filed. + + 4. Redistribution. You may reproduce and distribute copies of the + Work or Derivative Works thereof in any medium, with or without + modifications, and in Source or Object form, provided that You + meet the following conditions: + + (a) You must give any other recipients of the Work or + Derivative Works a copy of this License; and + + (b) You must cause any modified files to carry prominent notices + stating that You changed the files; and + + (c) You must retain, in the Source form of any Derivative Works + that You distribute, all copyright, patent, trademark, and + attribution notices from the Source form of the Work, + excluding those notices that do not pertain to any part of + the Derivative Works; and + + (d) If the Work includes a "NOTICE" text file as part of its + distribution, then any Derivative Works that You distribute must + include a readable copy of the attribution notices contained + within such NOTICE file, excluding those notices that do not + pertain to any part of the Derivative Works, in at least one + of the following places: within a NOTICE text file distributed + as part of the Derivative Works; within the Source form or + documentation, if provided along with the Derivative Works; or, + within a display generated by the Derivative Works, if and + wherever such third-party notices normally appear. The contents + of the NOTICE file are for informational purposes only and + do not modify the License. You may add Your own attribution + notices within Derivative Works that You distribute, alongside + or as an addendum to the NOTICE text from the Work, provided + that such additional attribution notices cannot be construed + as modifying the License. + + You may add Your own copyright statement to Your modifications and + may provide additional or different license terms and conditions + for use, reproduction, or distribution of Your modifications, or + for any such Derivative Works as a whole, provided Your use, + reproduction, and distribution of the Work otherwise complies with + the conditions stated in this License. + + 5. Submission of Contributions. Unless You explicitly state otherwise, + any Contribution intentionally submitted for inclusion in the Work + by You to the Licensor shall be under the terms and conditions of + this License, without any additional terms or conditions. + Notwithstanding the above, nothing herein shall supersede or modify + the terms of any separate license agreement you may have executed + with Licensor regarding such Contributions. + + 6. Trademarks. This License does not grant permission to use the trade + names, trademarks, service marks, or product names of the Licensor, + except as required for reasonable and customary use in describing the + origin of the Work and reproducing the content of the NOTICE file. + + 7. Disclaimer of Warranty. Unless required by applicable law or + agreed to in writing, Licensor provides the Work (and each + Contributor provides its Contributions) on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or + implied, including, without limitation, any warranties or conditions + of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A + PARTICULAR PURPOSE. You are solely responsible for determining the + appropriateness of using or redistributing the Work and assume any + risks associated with Your exercise of permissions under this License. + + 8. Limitation of Liability. In no event and under no legal theory, + whether in tort (including negligence), contract, or otherwise, + unless required by applicable law (such as deliberate and grossly + negligent acts) or agreed to in writing, shall any Contributor be + liable to You for damages, including any direct, indirect, special, + incidental, or consequential damages of any character arising as a + result of this License or out of the use or inability to use the + Work (including but not limited to damages for loss of goodwill, + work stoppage, computer failure or malfunction, or any and all + other commercial damages or losses), even if such Contributor + has been advised of the possibility of such damages. + + 9. Accepting Warranty or Additional Liability. While redistributing + the Work or Derivative Works thereof, You may choose to offer, + and charge a fee for, acceptance of support, warranty, indemnity, + or other liability obligations and/or rights consistent with this + License. However, in accepting such obligations, You may act only + on Your own behalf and on Your sole responsibility, not on behalf + of any other Contributor, and only if You agree to indemnify, + defend, and hold each Contributor harmless for any liability + incurred by, or claims asserted against, such Contributor by reason + of your accepting any such warranty or additional liability. + + END OF TERMS AND CONDITIONS + + APPENDIX: How to apply the Apache License to your work. + + To apply the Apache License to your work, attach the following + boilerplate notice, with the fields enclosed by brackets "{}" + replaced with your own identifying information. (Don't include + the brackets!) The text should be enclosed in the appropriate + comment syntax for the file format. We also recommend that a + file or class name and description of purpose be included on the + same "printed page" as the copyright notice for easier + identification within third-party archives. + + Copyright {yyyy} {name of copyright owner} + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. \ No newline at end of file diff --git a/README.md b/README.md new file mode 100644 index 000000000000..21d5252cd0d5 --- /dev/null +++ b/README.md @@ -0,0 +1,130 @@ +# KSQL - a Streaming SQL Engine for Apache Kafka™ from Confluent +--- + +A DEVELOPER PREVIEW + +--- + +KSQL is an open source streaming SQL engine that implements continuous, interactive queries against Apache Kafka™. It allows you to query, read, write, and process data in Apache Kafka in real-time, at scale using SQL commands. + +KSQL does not require proficiency with a programming language such as Java or Go, and it does not require you to install and manage a separate processing cluster technology. As such, it opens up the world of stream processing to a broader set of users and applications than ever before. + +This release is a DEVELOPER PREVIEW which is free and open-source from Confluent under the Apache 2.0 license. + +--- + +# Hello, KSQL! +--- +Here are some example queries to illustrate the look and feel of the KSQL syntax: + +Filter an inbound stream of page views to only show errors + +```sql +SELECT STREAM request, ip, status + WHERE status >= 400 +``` + +Create a new stream that contains the pageviews from female users only +```sql +CREATE STREAM pageviews_by_female_users AS + SELECT users.userid AS userid, pageid, regionid, gender FROM pageviews + LEFT JOIN users ON pageview.userid = users.userid + WHERE gender = 'FEMALE'; +``` + +Continuously compute the number of pageviews for each page with 5-second tumbling windows +```sql +CREATE TABLE pageview_counts AS + SELECT pageid, count(*) FROM pageviews + WINDOW TUMBLING (size 5 second) + GROUP BY pageid; +``` + +# Let’s Play with KSQL +--- + +* First-time users may want to try our [interactive quickstart](https://github.com/confluentinc/ksql). +* If you want a more realistic end-to-end example, walk through our [KSQL demo](https://github.com/confluentinc/ksql). + +To learn more about KSQL see our [documentation](https://github.com/confluentinc/ksql) including the [KSQL Syntax Guide](https://github.com/confluentinc/ksql). + +# Need help? +--- +If you need help or have questions, you have several options: +* Ask a question in the #ksql channel in our public [Confluent Community Slack](https://confluent.typeform.com/to/GxTHUD). Account registration is free and self-service. +* Create a [ticket](https://github.com/confluentinc/ksql) in our issue [tracker](https://github.com/confluentinc/ksql). +* Join the [Confluent google group](https://groups.google.com/forum/#!forum/confluent-platform). + +# How it works +--- +KSQL consists of a client and a server component. The client is a command line interface (CLI) similar to the CLIs of MySQL or PostgreSQL. The server, of which you can run one or many instances, executes those queries for you. + +You can use KSQL in stand-alone mode and/or in client-server mode. + +In stand-alone mode, both the KSQL client and server components are co-located on the same machine, in the same JVM, and are started together which makes it convenient for local development and testing. + +![alt text](https://user-images.githubusercontent.com/2977624/29090610-f4b11096-7c34-11e7-8a63-85c9ead22bc3.png) + +In client-server mode, a pool of KSQL server(s) can be running on remote machines, VMs, or containers and the CLI connects to them over HTTP. + +![alt text](https://user-images.githubusercontent.com/2977624/29090617-fab5e930-7c34-11e7-9eee-0554192854d5.png) + +# Frequently Asked Questions +--- +**What are the benefits of KSQL?** + +KSQL allows you to query, read, write, and process data in Apache Kafka in real-time and at scale using intuitive SQL-like syntax. KSQL does not require proficiency with a programming language such as Java or Scala, and you don’t have to install a separate processing cluster technology. + +**What are the technical requirements of KSQL?** + +KSQL only requires +1. a Java runtime environment +2. access to an Apache Kafka cluster for reading and writing data in real-time. The cluster can be on-premises or in the cloud. + +We recommend the use of [Confluent Platform](https://www.confluent.io/product/confluent-platform/) or [Confluent Cloud](https://www.confluent.io/confluent-cloud/) for running Apache Kafka. + +**Is KSQL owned by the Apache Software Foundation?** + +No, KSQL is owned and maintained by [Confluent Inc.](https://www.confluent.io/) as part of its free [Confluent Open Source](https://www.confluent.io/product/confluent-open-source/) product. + +**How does KSQL compare to Apache Kafka’s Streams API?** + +KSQL is complementary to the Kafka Streams API, and indeed executes queries through Streams applications. One of the key benefits of KSQL is that it does not require the user to develop any code in Java or Scala. +This enables users to use a SQL-like interface alone to construct streaming ETL pipelines, as well as responding to a real-time, continuous business requests. For full-fledged stream processing applications Kafka Streams remains a more appropriate choice. +As with many technologies each has its sweet-spot based on technical requirements, mission-criticality, and user skillset. + +**Is KSQL ready for production?** + +KSQL is a technical preview at this point in time. We do not yet recommend its use for production purposes. + +**Can I use KSQL with my favorite data format (e.g. JSON, Avro)?** + +KSQL currently supports formats: + +* DELIMITED (e.g. CSV) +* JSON + +_Support for AVRO is expected soon._ + +**Is KSQL fully compliant to ANSI SQL?** + +KSQL is a dialect inspired by ANSI SQL. It has some differences because it is geared at processing streaming data. For example, ANSI SQL has no notion of “windowing” for use cases such as performing aggregations on data grouped into 5-minute windows, which is a commonly required functionality in the streaming world. + +# Contributing to KSQL +--- +*This section contains information about how to contribute code and documentation, etc.* + +To build KSQL locally: + +```sh +$ git clone https://github.com/confluentinc/ksql.git +$ cd ksql +$ mvn clean package +``` + +# License +--- +The project is licensed under the Apache License, version 2.0. + +*Apache, Apache Kafka, Kafka, and associated open source project names are trademarks of the [Apache Software Foundation](https://www.apache.org/)* + diff --git a/bin/ksql-cli b/bin/ksql-cli new file mode 100755 index 000000000000..b5fe93626ea4 --- /dev/null +++ b/bin/ksql-cli @@ -0,0 +1,17 @@ +#!/bin/bash +# (Copyright) [2017 - 2017] Confluent, Inc. + +# +# Use shellcheck to lint this file +# +set -ue + +base_dir=$( cd "$( dirname "${BASH_SOURCE[0]}" )/.." && pwd ) +: "${KSQL_CONFIG_DIR:="$base_dir/config"}" + +: "${KSQL_LOG4J_OPTS:=""}" +if [ -z "$KSQL_LOG4J_OPTS" ] && [ -e "$KSQL_CONFIG_DIR/log4j-silent.properties" ]; then + export KSQL_LOG4J_OPTS="-Dlog4j.configuration=file:$KSQL_CONFIG_DIR/log4j-silent.properties" +fi + +exec "$base_dir"/bin/ksql-run-class io.confluent.ksql.Ksql "$@" diff --git a/bin/ksql-cli-stop b/bin/ksql-cli-stop new file mode 100755 index 000000000000..ca0642d96d19 --- /dev/null +++ b/bin/ksql-cli-stop @@ -0,0 +1,23 @@ +#!/bin/bash +# (Copyright) [2017 - 2017] Confluent, Inc. + +# +# Use shellcheck to lint this file +# + +# This script is mainly useful if the user has run some non-interactive query via the CLI with the +# -daemon flag passed in; it's less painful than manually grepping for the process and killing it +# However, it comes with the downside that it kills every currently-running CLI session, interactive +# or not, daemon or not, so: +# TODO: Consider only killing non-interactive and/or daemon instances of the CLI +CLIPIDS=$(jcmd | grep 'io\.confluent\.ksql\.Ksql' | awk '{print $1}') + +if [ -z "$CLIPIDS" ]; then + echo "No CLI session(s) to stop" + exit 1 +fi + +for PID in $CLIPIDS; do + kill -s TERM "$PID" + while kill -0 "$PID" >/dev/null 2>&1; do sleep 1; done +done diff --git a/bin/ksql-datagen b/bin/ksql-datagen new file mode 100755 index 000000000000..22da804283e3 --- /dev/null +++ b/bin/ksql-datagen @@ -0,0 +1,17 @@ +#!/bin/bash +# (Copyright) [2017 - 2017] Confluent, Inc. + +# +# Use shellcheck to lint this file +# +set -ue + +base_dir=$( cd "$( dirname "${BASH_SOURCE[0]}" )/.." && pwd ) +: "${KSQL_CONFIG_DIR:="$base_dir/config"}" + +: "${KSQL_LOG4J_OPTS:=""}" +if [ -z "$KSQL_LOG4J_OPTS" ] && [ -e "$KSQL_CONFIG_DIR/log4j-silent.properties" ]; then + export KSQL_LOG4J_OPTS="-Dlog4j.configuration=file:$KSQL_CONFIG_DIR/log4j-silent.properties" +fi + +exec "$base_dir"/bin/ksql-run-class io.confluent.ksql.datagen.DataGen "$@" diff --git a/bin/ksql-node b/bin/ksql-node new file mode 100755 index 000000000000..d0692eddd32f --- /dev/null +++ b/bin/ksql-node @@ -0,0 +1,17 @@ +#!/bin/bash +# (Copyright) [2017 - 2017] Confluent, Inc. + +# +# Use shellcheck to lint this file +# +set -ue + +base_dir=$( cd "$( dirname "${BASH_SOURCE[0]}" )/.." && pwd ) +: "${KSQL_CONFIG_DIR:="$base_dir/config"}" + +: "${KSQL_LOG4J_OPTS:=""}" +if [ -z "$KSQL_LOG4J_OPTS" ] && [ -e "$KSQL_CONFIG_DIR/log4j-silent.properties" ]; then + export KSQL_LOG4J_OPTS="-Dlog4j.configuration=file:$KSQL_CONFIG_DIR/log4j-silent.properties" +fi + +exec "$base_dir"/bin/ksql-run-class io.confluent.ksql.Ksql standalone "$@" diff --git a/bin/ksql-run-class b/bin/ksql-run-class new file mode 100755 index 000000000000..7a3b0b5b302d --- /dev/null +++ b/bin/ksql-run-class @@ -0,0 +1,144 @@ +#!/bin/bash +# (Copyright) [2017 - 2017] Confluent, Inc. + +# Use shellcheck to lint this file + +set -ue + +#cd -P deals with symlink from /bin to /usr/bin +base_dir=$( cd -P "$( dirname "${BASH_SOURCE[0]}" )/.." && pwd ) + +# Log directory to use +if [ -z "${LOG_DIR:-}" ]; then + LOG_DIR="/tmp/ksql-logs" +fi + +# create logs directory +if [ ! -d "$LOG_DIR" ]; then + mkdir -p "$LOG_DIR" +fi + + +: "${KSQL_CLASSPATH:=""}" +: "${KSQL_LOG4J_OPTS:=""}" +: "${KSQL_JMX_OPTS:=""}" +: "${KSQL_OPTS:=""}" +: "${KSQL_HEAP_OPTS:=""}" +: "${KSQL_JVM_PERFORMANCE_OPTS:=""}" +: "${JMX_PORT:=""}" +: "${JAVA_HOME:=""}" + +# Development jars. `mvn package` should collect all the required dependency jars here +for project in ksql-core ksql-examples ksql-rest-app ksql-cli; do + for dir in "$base_dir/$project/target/$project"-*-development; do + KSQL_DIR="$dir/share/java/$project" + if [ -d "$KSQL_DIR" ]; then + KSQL_CLASSPATH="$KSQL_CLASSPATH:$KSQL_DIR/*" + fi + done +done + +# Production jars - each one is prepended so they will appear in reverse order. KSQL jars take precedence over other stuff passed in via CLASSPATH env var +for library in "confluent-common" "rest-utils" "ksql-core" "ksql-rest-app" "ksql-cli"; do + DIR="$base_dir/share/java/$library" + if [ -d "$DIR" ]; then + KSQL_CLASSPATH="$DIR/*:$KSQL_CLASSPATH" + fi +done + +# logj4 settings +if [ -z "$KSQL_LOG4J_OPTS" ]; then + # Test for files from dev -> packages so this will work as expected in dev if you have packages + # installed + if [ -e "$base_dir/config/log4j.properties" ]; then # Dev environment + KSQL_LOG4J_OPTS="-Dlog4j.configuration=file:$base_dir/config/log4j.properties" + elif [ -e "$base_dir/etc/ksql/log4j.properties" ]; then # Simple zip file layout + KSQL_LOG4J_OPTS="-Dlog4j.configuration=file:$base_dir/etc/ksql/log4j.properties" + elif [ -e "/etc/ksql/log4j.properties" ]; then # Normal install layout + KSQL_LOG4J_OPTS="-Dlog4j.configuration=file:/etc/ksql/log4j.properties" + fi +fi + +# JMX settings +if [ -z "$KSQL_JMX_OPTS" ]; then + KSQL_JMX_OPTS="-Dcom.sun.management.jmxremote -Dcom.sun.management.jmxremote.authenticate=false -Dcom.sun.management.jmxremote.ssl=false " +fi + +# JMX port to use +if [ ! -z "$JMX_PORT" ]; then + KSQL_JMX_OPTS="$KSQL_JMX_OPTS -Dcom.sun.management.jmxremote.port=$JMX_PORT " +fi + +# Generic jvm settings you want to add +if [ -z "$KSQL_OPTS" ]; then + KSQL_OPTS="" +fi + +# Which java to use +if [ -z "$JAVA_HOME" ]; then + JAVA="java" +else + JAVA="$JAVA_HOME/bin/java" +fi + +# Memory options +if [ -z "$KSQL_HEAP_OPTS" ]; then + KSQL_HEAP_OPTS="-Xmx3g" +fi + +# JVM performance options +if [ -z "$KSQL_JVM_PERFORMANCE_OPTS" ]; then + KSQL_JVM_PERFORMANCE_OPTS="-server -XX:+UseParNewGC -XX:+UseConcMarkSweepGC -XX:+CMSClassUnloadingEnabled -XX:+CMSScavengeBeforeRemark -XX:+DisableExplicitGC -Djava.awt.headless=true" +fi + +usage() { + echo "USAGE: $0 [-daemon] [opts] [-help]" + exit 1 +} + +if [ $# -lt 1 ]; +then + usage +fi + +MAIN="$1" +shift + +DAEMON_MODE="" +HELP="" + +while [ $# -gt 0 ]; do + COMMAND="$1" + case "$COMMAND" in + -help) + HELP="true" + shift + ;; + -daemon) + DAEMON_MODE="true" + shift + ;; + *) + break + ;; + esac +done + +if [ "x$HELP" = "xtrue" ]; then + usage +fi + +OPTIONS=($KSQL_HEAP_OPTS) +OPTIONS+=($KSQL_JVM_PERFORMANCE_OPTS) +OPTIONS+=($KSQL_JMX_OPTS) +OPTIONS+=($KSQL_LOG4J_OPTS) +OPTIONS+=($KSQL_OPTS) + +# Launch mode +if [ "x$DAEMON_MODE" = "xtrue" ]; then + DAEMON_STDOUT_FILE="$LOG_DIR/ksql.out" + echo "Writing console output to $DAEMON_STDOUT_FILE" + nohup "$JAVA" -cp "$KSQL_CLASSPATH" "${OPTIONS[@]}" "$MAIN" "$@" 2>&1 < /dev/null > "$DAEMON_STDOUT_FILE" & +else + exec "$JAVA" -cp "$KSQL_CLASSPATH" "${OPTIONS[@]}" "$MAIN" "$@" +fi diff --git a/bin/ksql-server-start b/bin/ksql-server-start new file mode 100755 index 000000000000..218efc71260f --- /dev/null +++ b/bin/ksql-server-start @@ -0,0 +1,20 @@ +#!/bin/bash +# (Copyright) [2017 - 2017] Confluent, Inc. + +# +# Use shellcheck to lint this file +# +set -ue + +base_dir=$( cd "$( dirname "${BASH_SOURCE[0]}" )/.." && pwd ) +: "${KSQL_CONFIG_DIR:="$base_dir/config"}" + +: "${KSQL_LOG4J_OPTS:=""}" +if [ -z "$KSQL_LOG4J_OPTS" ] && [ -e "$KSQL_CONFIG_DIR/log4j-silent.properties" ]; then + export KSQL_LOG4J_OPTS="-Dlog4j.configuration=file:$KSQL_CONFIG_DIR/log4j-rolling.properties" +fi + +# TODO: Enable specification of properties via command-line arg in the KsqlRestApplication class +# so that useful defaults for bootstrap server, port, etc. can be established here + +exec "$base_dir"/bin/ksql-run-class io.confluent.ksql.rest.server.KsqlRestApplication "$@" diff --git a/bin/ksql-server-stop b/bin/ksql-server-stop new file mode 100755 index 000000000000..267e013c78c0 --- /dev/null +++ b/bin/ksql-server-stop @@ -0,0 +1,17 @@ +#!/bin/bash +# (Copyright) [2017 - 2017] Confluent, Inc. + +# +# Use shellcheck to lint this file +# +SERVERPIDS=$(jcmd | grep 'io\.confluent\.ksql\.rest\.server\.KsqlRestApplication' | awk '{print $1}') + +if [ -z "$SERVERPIDS" ]; then + echo "No server(s) to stop" + exit 1 +fi + +for PID in $SERVERPIDS; do + kill -s TERM "$PID" + while kill -0 "$PID" >/dev/null 2>&1; do sleep 1; done +done diff --git a/checkstyle/suppressions.xml b/checkstyle/suppressions.xml new file mode 100644 index 000000000000..1d2463df3aaf --- /dev/null +++ b/checkstyle/suppressions.xml @@ -0,0 +1,37 @@ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + \ No newline at end of file diff --git a/config/ksqlserver.properties b/config/ksqlserver.properties new file mode 100644 index 000000000000..effbb5ee86f8 --- /dev/null +++ b/config/ksqlserver.properties @@ -0,0 +1,5 @@ +bootstrap.servers=localhost:9092 +application.id=ksql_server_quickstart +ksql.command.topic.suffix=commands + +listeners=http://localhost:8080 diff --git a/config/log4j-rolling.properties b/config/log4j-rolling.properties new file mode 100644 index 000000000000..900c42ad7b6e --- /dev/null +++ b/config/log4j-rolling.properties @@ -0,0 +1,42 @@ +# this is a sample log4j config that will roll log files +# lines with `File=` may need to be updated for your environment + +log4j.rootLogger=INFO, main + +# appenders +log4j.appender.main=org.apache.log4j.RollingFileAppender +log4j.appender.main.File=/tmp/ksql.log +log4j.appender.main.layout=org.apache.log4j.PatternLayout +log4j.appender.main.layout.ConversionPattern=[%d] %p %m (%c:%L)%n +log4j.appender.main.MaxFileSize=10MB +log4j.appender.main.MaxBackupIndex=5 +log4j.appender.main.append=true + +log4j.appender.streams=org.apache.log4j.RollingFileAppender +log4j.appender.streams.File=/tmp/ksql-streams.log +log4j.appender.streams.layout=org.apache.log4j.PatternLayout +log4j.appender.streams.layout.ConversionPattern=[%d] %p %m (%c:%L)%n + +log4j.appender.kafka=org.apache.log4j.RollingFileAppender +log4j.appender.kafka.File=/tmp/ksql-kafka.log +log4j.appender.kafka.layout=org.apache.log4j.PatternLayout +log4j.appender.kafka.layout.ConversionPattern=[%d] %p %m (%c:%L)%n +log4j.appender.kafka.MaxFileSize=10MB +log4j.appender.kafka.MaxBackupIndex=5 +log4j.appender.kafka.append=true + +# loggers +log4j.logger.org.apache.kafka.streams=INFO, streams +log4j.additivity.org.apache.kafka.streams=false + +log4j.logger.kafka=ERROR, kafka +log4j.additivity.kafka=false + +log4j.logger.org.apache.zookeeper=ERROR, kafka +log4j.additivity.org.apache.zookeeper=false + +log4j.logger.org.apache.kafka=ERROR, kafka +log4j.additivity.org.apache.kafka=false + +log4j.logger.org.I0Itec.zkclient=ERROR, kafka +log4j.additivity.org.I0Itec.zkclient=false diff --git a/config/log4j-silent.properties b/config/log4j-silent.properties new file mode 100644 index 000000000000..8520d48c32d1 --- /dev/null +++ b/config/log4j-silent.properties @@ -0,0 +1 @@ +log4j.rootLogger=OFF diff --git a/config/log4j.properties b/config/log4j.properties new file mode 100644 index 000000000000..1978cf31c9d1 --- /dev/null +++ b/config/log4j.properties @@ -0,0 +1,16 @@ +log4j.rootLogger=INFO, stdout + +log4j.appender.stdout=org.apache.log4j.ConsoleAppender +log4j.appender.stdout.layout=org.apache.log4j.PatternLayout +log4j.appender.stdout.layout.ConversionPattern=[%d] %p %m (%c:%L)%n + +log4j.appender.streams=org.apache.log4j.ConsoleAppender +log4j.appender.streams.layout=org.apache.log4j.PatternLayout +log4j.appender.streams.layout.ConversionPattern=[%d] %p %m (%c:%L)%n + +log4j.logger.kafka=ERROR, stdout +log4j.logger.org.apache.kafka.streams=INFO, streams +log4j.additivity.org.apache.kafka.streams=false +log4j.logger.org.apache.zookeeper=ERROR, stdout +log4j.logger.org.apache.kafka=ERROR, stdout +log4j.logger.org.I0Itec.zkclient=ERROR, stdout diff --git a/docker/README.md b/docker/README.md new file mode 100644 index 000000000000..a3402468d208 --- /dev/null +++ b/docker/README.md @@ -0,0 +1,14 @@ +# KSQL Demo Images + +This project uses the `dockerfile-maven` plugin to build Docker images via Maven. + +To build SNAPSHOT images, configure `.m2/settings.xml` for SNAPSHOT dependencies. These must be available at build time. + +Pushing images is currently handled via `docker push`, and is not part of the build. + +``` +mvn package -DskipTests # Build local images + +# Build images for a private registry; trailing '/' is required: +# mvn package -DskipTests -Ddocker.registry=docker.example.com:8080/ -Ddocker.tag=$VERSION-$BUILD_NUMBER +``` diff --git a/ksql-cli/Dockerfile b/ksql-cli/Dockerfile new file mode 100644 index 000000000000..cc64cb845de9 --- /dev/null +++ b/ksql-cli/Dockerfile @@ -0,0 +1,11 @@ +ARG DOCKER_REGISTRY + +FROM ${DOCKER_REGISTRY}confluentinc/cp-base + +ARG KSQL_VERSION +ARG ARTIFACT_ID + +ADD target/${ARTIFACT_ID}-${KSQL_VERSION}-standalone.jar /usr/share/java/${ARTIFACT_ID}/${ARTIFACT_ID}-${KSQL_VERSION}-standalone.jar +ADD target/${ARTIFACT_ID}-${KSQL_VERSION}-package/bin/* /usr/bin/ +ADD target/${ARTIFACT_ID}-${KSQL_VERSION}-package/etc/* /etc/ksql/ +ADD target/${ARTIFACT_ID}-${KSQL_VERSION}-package/share/doc/* /usr/share/doc/${ARTIFACT_ID}/ diff --git a/ksql-cli/pom.xml b/ksql-cli/pom.xml new file mode 100644 index 000000000000..0fa8ada72afb --- /dev/null +++ b/ksql-cli/pom.xml @@ -0,0 +1,192 @@ + + + 4.0.0 + + + io.confluent.ksql + ksql-parent + 0.1-SNAPSHOT + + + ksql-cli + + + ${project.parent.basedir} + io.confluent.ksql.Ksql + false + ${main-class} + + + + + io.confluent.ksql + ksql-core + + + + io.confluent.ksql + ksql-rest-app + + + + com.github.rvesse + airline + + + + javax.inject + javax.inject + + + + junit + junit + + + + net.java.dev.jna + jna + + + + org.easymock + easymock + + + + org.jline + jline + + + + + io.confluent.ksql + ksql-core + ${project.version} + test-jar + test + + + + + org.apache.curator + curator-test + 2.9.0 + test + + + + org.apache.kafka + kafka_${kafka.scala.version} + test + test + + + + org.apache.kafka + kafka-clients + test + test + + + + + + + org.apache.maven.plugins + maven-assembly-plugin + + + src/assembly/development.xml + src/assembly/package.xml + src/assembly/standalone.xml + + + + ${main-class} + + + false + + + + make-assembly + package + + single + + + + + + + org.codehaus.mojo + exec-maven-plugin + ${exec-maven-plugin.version} + + + create-licenses + + io.confluent.licenses.LicenseFinder + + + -i ${project.build.directory}/${project.build.finalName}-package/share/java/${artifactId} + -f + -h ${project.build.directory}/${project.build.finalName}-package/share/doc/${project.artifactId}/licenses.html + -l ${project.build.directory}/${project.build.finalName}-package/share/doc/${project.artifactId}/licenses + -n ${project.build.directory}/${project.build.finalName}-package/share/doc/${project.artifactId}/notices + -x licenses-${project.version}.jar + + + package + + java + + + + + true + true + + io.confluent + licenses + + + + + io.confluent + licenses + ${licenses.version} + + + + + + com.spotify + dockerfile-maven-plugin + ${dockerfile-maven-plugin.version} + + + default + + build + + + + ${project.artifactId} + ${project.version} + ${docker.registry} + + ${docker.tag} + ${docker.registry}confluentinc/${project.artifactId} + + + + + + + + diff --git a/ksql-cli/src/assembly/development.xml b/ksql-cli/src/assembly/development.xml new file mode 100644 index 000000000000..3cb720d9b06e --- /dev/null +++ b/ksql-cli/src/assembly/development.xml @@ -0,0 +1,51 @@ + + + development + + dir + + false + + + ${project.parent.basedir} + share/doc/ksql-cli/ + + README* + COPYRIGHT* + + + + ${project.parent.basedir} + + + bin/* + + + + ${project.parent.basedir}/config + etc/ksql-cli + + * + + + + + + share/java/ksql-cli + true + + true + + org.slf4j:slf4j-log4j12 + + + + diff --git a/ksql-cli/src/assembly/package.xml b/ksql-cli/src/assembly/package.xml new file mode 100644 index 000000000000..721cfdae363a --- /dev/null +++ b/ksql-cli/src/assembly/package.xml @@ -0,0 +1,55 @@ + + + package + + dir + + false + + + ${project.parent.basedir} + share/doc/ksql-cli/ + + version.txt + COPYRIGHT* + + + + ${project.parent.basedir} + + + bin/* + + + + ${project.parent.basedir}/config + etc/ksql-cli + + * + + + + + + share/java/ksql-cli + true + + true + + io.confluent.ksql:ksql-rest-app + io.confluent:rest-utils + io.confluent:common-* + com.google.guava:guava + + + + diff --git a/ksql-cli/src/assembly/standalone.xml b/ksql-cli/src/assembly/standalone.xml new file mode 100644 index 000000000000..7297ef3b3a8b --- /dev/null +++ b/ksql-cli/src/assembly/standalone.xml @@ -0,0 +1,45 @@ + + + standalone + + jar + + false + + + ${project.parent.basedir} + / + + README* + COPYRIGHT* + + + + + + / + true + true + runtime + + ${project.groupId}:${project.artifactId} + + + + + / + false + true + runtime + + + log4j.properties + + + + + diff --git a/ksql-cli/src/main/java/io/confluent/ksql/Ksql.java b/ksql-cli/src/main/java/io/confluent/ksql/Ksql.java new file mode 100644 index 000000000000..0c2d2db7603b --- /dev/null +++ b/ksql-cli/src/main/java/io/confluent/ksql/Ksql.java @@ -0,0 +1,46 @@ +/** + * Copyright 2017 Confluent Inc. + **/ + +package io.confluent.ksql; + +import com.github.rvesse.airline.help.Help; +import com.github.rvesse.airline.parser.errors.ParseException; +import io.confluent.ksql.cli.commands.Local; +import io.confluent.ksql.cli.commands.Remote; +import io.confluent.ksql.cli.commands.Standalone; + +import java.io.IOException; + +public class Ksql { + + public static void main(String[] args) throws IOException { + Runnable runnable = null; + com.github.rvesse.airline.Cli cli = + com.github.rvesse.airline.Cli.builder("Cli") + .withDescription("Kafka Query Language") + .withDefaultCommand(Help.class) + .withCommand(Local.class) + .withCommand(Remote.class) + .withCommand(Standalone.class) + .build(); + + try { + runnable = cli.parse(args); + runnable.run(); + } catch (ParseException exception) { + if (exception.getMessage() != null) { + System.err.println(exception.getMessage()); + } else { + System.err.println("Options parsing failed for an unknown reason"); + } + System.err.println("See the help command for usage information"); + } catch (Exception e) { + System.err.println(e.getMessage()); + } + if ((runnable != null) && !(runnable instanceof Standalone)) { + System.exit(0); + } + + } +} diff --git a/ksql-cli/src/main/java/io/confluent/ksql/cli/Cli.java b/ksql-cli/src/main/java/io/confluent/ksql/cli/Cli.java new file mode 100644 index 000000000000..27a1c9b07c73 --- /dev/null +++ b/ksql-cli/src/main/java/io/confluent/ksql/cli/Cli.java @@ -0,0 +1,551 @@ +/** + * Copyright 2017 Confluent Inc. + **/ + +package io.confluent.ksql.cli; + +import io.confluent.ksql.KsqlEngine; +import io.confluent.ksql.cli.console.CliSpecificCommand; +import io.confluent.ksql.rest.entity.CommandStatus; +import io.confluent.ksql.rest.entity.CommandStatusEntity; +import io.confluent.ksql.rest.entity.ErrorMessageEntity; +import io.confluent.ksql.rest.entity.KsqlEntity; +import io.confluent.ksql.util.CliUtils; +import io.confluent.ksql.ddl.DdlConfig; +import io.confluent.ksql.parser.AstBuilder; +import io.confluent.ksql.parser.KsqlParser; +import io.confluent.ksql.parser.SqlBaseParser; +import io.confluent.ksql.rest.client.KsqlRestClient; +import io.confluent.ksql.rest.client.RestResponse; +import io.confluent.ksql.rest.entity.KsqlEntityList; +import io.confluent.ksql.rest.entity.PropertiesList; +import io.confluent.ksql.cli.console.Console; +import io.confluent.ksql.util.KsqlException; +import io.confluent.ksql.util.Version; +import org.apache.commons.lang3.exception.ExceptionUtils; +import org.apache.kafka.clients.consumer.ConsumerConfig; +import org.apache.kafka.clients.producer.ProducerConfig; +import org.apache.kafka.common.config.AbstractConfig; +import org.apache.kafka.common.config.ConfigDef; +import org.apache.kafka.streams.StreamsConfig; +import org.jline.reader.EndOfFileException; +import org.jline.reader.UserInterruptException; +import org.jline.terminal.Terminal; +import org.slf4j.Logger; +import org.slf4j.LoggerFactory; + +import java.io.Closeable; +import java.io.IOException; +import java.io.InputStream; +import java.nio.file.Files; +import java.nio.file.Paths; +import java.util.ArrayList; +import java.util.Arrays; +import java.util.List; +import java.util.Objects; +import java.util.Optional; +import java.util.Scanner; +import java.util.concurrent.CancellationException; +import java.util.concurrent.ExecutionException; +import java.util.concurrent.ExecutorService; +import java.util.concurrent.Executors; +import java.util.concurrent.Future; +import java.util.concurrent.TimeUnit; +import java.util.concurrent.TimeoutException; + +public class Cli implements Closeable, AutoCloseable { + + private static final Logger LOGGER = LoggerFactory.getLogger(Cli.class); + + private static final ConfigDef CONSUMER_CONFIG_DEF = getConfigDef(ConsumerConfig.class); + private static final ConfigDef PRODUCER_CONFIG_DEF = getConfigDef(ProducerConfig.class); + + private final ExecutorService queryStreamExecutorService; + + private final Long streamedQueryRowLimit; + private final Long streamedQueryTimeoutMs; + + final KsqlRestClient restClient; + final Console terminal; + + public Cli( + Long streamedQueryRowLimit, + Long streamedQueryTimeoutMs, + KsqlRestClient restClient, + Console terminal + ) throws IOException { + Objects.requireNonNull(restClient, "Must provide the CLI with a REST client"); + Objects.requireNonNull(terminal, "Must provide the CLI with a terminal"); + + this.streamedQueryRowLimit = streamedQueryRowLimit; + this.streamedQueryTimeoutMs = streamedQueryTimeoutMs; + this.restClient = restClient; + this.terminal = terminal; + + this.queryStreamExecutorService = Executors.newSingleThreadExecutor(); + } + + public void runInteractively() { + displayWelcomeMessage(); + boolean eof = false; + while (!eof) { + try { + handleLine(readLine()); + } catch (EndOfFileException exception) { + // EOF is fine, just terminate the REPL + terminal.writer().println("Exiting KSQL."); + eof = true; + } catch (Exception exception) { + LOGGER.error(ExceptionUtils.getStackTrace(exception)); + if (exception.getMessage() != null) { + terminal.writer().println(exception.getMessage()); + } else { + terminal.writer().println(exception.getClass().getName()); + // TODO: Maybe ask the user if they'd like to see the stack trace here? + } + } + terminal.flush(); + } + } + + private void displayWelcomeMessage() { + String serverVersion; + try { + serverVersion = restClient.makeRootRequest().getResponse().getVersion(); + } catch (Exception exception) { + serverVersion = ""; + } + String cliVersion = Version.getVersion(); + + /* + Should look like: + ================================= + = _ __ _____ ____ _ = + = | |/ // ____|/ __ \| | = + = | ' /| (___ | | | | | = + = | < \___ \| | | | | = + = | . \ ____) | |__| | |____ = + = |_|\_\_____/ \___\_\______| = + = = + == Kafka Streams Query Language = + Copyright 2017 Confluent Inc. + CLI v1.0.0, Server v1.0.0 located at http://localhost:9098 + + Text generated via http://www.network-science.de/ascii/, with the "big" font + */ + int logoWidth = 33; + String copyrightMessage = "Copyright 2017 Confluent Inc."; + String helpReminderMessage = "Having trouble? " + + "Type 'help' (case-insensitive) for a rundown of how things work!"; + // Don't want to display the logo if it'll just end up getting wrapped and looking hideous + if (terminal.getWidth() >= logoWidth) { + // Want to center the logo, but in the case of something like a fullscreen terminal, just + // centering around the help message (longest single line of text in the welcome message) + // should be enough; looks a little weird if you try to center the logo on a wide enough + // screen and it just kind of ends up out in the middle of nowhere; hence, the call to + // Math.min(terminal.getWidth(), helpReminderMessage.length()) + int paddedLogoWidth = Math.min(terminal.getWidth(), helpReminderMessage.length()); + int paddingWidth = (paddedLogoWidth - logoWidth) / 2; + String leftPadding = new String(new byte[paddingWidth]).replaceAll(".", " "); + terminal.writer().printf("%s======================================%n", leftPadding); + terminal.writer().printf("%s= _ __ _____ ____ _ =%n", leftPadding); + terminal.writer().printf("%s= | |/ // ____|/ __ \\| | =%n", leftPadding); + terminal.writer().printf("%s= | ' /| (___ | | | | | =%n", leftPadding); + terminal.writer().printf("%s= | < \\___ \\| | | | | =%n", leftPadding); + terminal.writer().printf("%s= | . \\ ____) | |__| | |____ =%n", leftPadding); + terminal.writer().printf("%s= |_|\\_\\_____/ \\___\\_\\______| =%n", leftPadding); + terminal.writer().printf("%s= =%n", leftPadding); + terminal.writer().printf("%s= Streaming Query Language for Kafka =%n", leftPadding); + terminal.writer().printf("%s %s%n", copyrightMessage, leftPadding); + } else { + terminal.writer().printf("KSQL, %s%n", copyrightMessage); + } + terminal.writer().println(); + terminal.writer().printf( + "CLI v%s, Server v%s located at %s%n", + cliVersion, + serverVersion, + restClient.getServerAddress() + ); + terminal.writer().println(); + terminal.writer().println(helpReminderMessage); + terminal.writer().println(); + terminal.flush(); + + } + + public void runNonInteractively(String input) throws Exception { + // Allow exceptions to halt execution of the Ksql script as soon as the first one is encountered + for (String logicalLine : getLogicalLines(input)) { + try { + handleLine(logicalLine); + } catch (EndOfFileException exception) { + // Swallow these silently; they're thrown by the exit command to terminate the REPL + return; + } + } + } + + private List getLogicalLines(String input) { + // TODO: Convert the input string into an InputStream, then feed it to the terminal via + // TerminalBuilder.streams(InputStream, OutputStream) + List result = new ArrayList<>(); + StringBuilder logicalLine = new StringBuilder(); + for (String physicalLine : input.split("\n")) { + if (!physicalLine.trim().isEmpty()) { + if (physicalLine.endsWith("\\")) { + logicalLine.append(physicalLine.substring(0, physicalLine.length() - 1)); + } else { + result.add(logicalLine.append(physicalLine).toString().trim()); + logicalLine = new StringBuilder(); + } + } + } + return result; + } + + @Override + public void close() throws IOException { + queryStreamExecutorService.shutdownNow(); + restClient.close(); + terminal.close(); + } + + public void handleLine(String line) throws Exception { + String trimmedLine = Optional.ofNullable(line).orElse("").trim(); + + if (trimmedLine.isEmpty()) { + return; + } + + String[] commandArgs = trimmedLine.split("\\s+", 2); + CliSpecificCommand cliSpecificCommand = terminal.getCliSpecificCommands().get(commandArgs[0].toLowerCase()); + if (cliSpecificCommand != null) { + cliSpecificCommand.execute(commandArgs.length > 1 ? commandArgs[1] : ""); + } else { + handleStatements(line); + } + } + + /** + * Attempt to read a logical line of input from the user. Can span multiple physical lines, as + * long as all but the last end with '\\'. + * @return The parsed, logical line. + * @throws EndOfFileException If there is no more input available from the user. + * @throws IOException If any other I/O error occurs. + */ + private String readLine() throws IOException { + while (true) { + try { + String result = terminal.getLineReader().readLine(); + // A 'dumb' terminal (the kind used at runtime if a 'system' terminal isn't available) will + // return null on EOF and user interrupt, instead of throwing the more fine-grained + // exceptions. This null-check helps ensure that, upon encountering EOF, even a 'dumb' + // terminal will be able to exit intelligently. + if (result == null) { + throw new EndOfFileException(); + } else { + return result.trim(); + } + } catch (UserInterruptException exception) { + // User hit ctrl-C, just clear the current line and try again. + terminal.writer().println("^C"); + terminal.flush(); + } + } + } + + private void handleStatements(String line) + throws IOException, InterruptedException, ExecutionException { + StringBuilder consecutiveStatements = new StringBuilder(); + for (SqlBaseParser.SingleStatementContext statementContext : + new KsqlParser().getStatements(line) + ) { + String statementText = KsqlEngine.getStatementString(statementContext); + if (statementContext.statement() instanceof SqlBaseParser.QuerystatementContext + || statementContext.statement() instanceof SqlBaseParser.PrintTopicContext) { + if (consecutiveStatements.length() != 0) { + printKsqlResponse( + restClient.makeKsqlRequest(consecutiveStatements.toString()) + ); + consecutiveStatements = new StringBuilder(); + } + if (statementContext.statement() instanceof SqlBaseParser.QuerystatementContext) { + handleStreamedQuery(statementText); + } else { + handlePrintedTopic(statementText); + } + } else if (statementContext.statement() instanceof SqlBaseParser.ListPropertiesContext) { + + KsqlEntityList ksqlEntityList = restClient.makeKsqlRequest(statementText).getResponse(); + PropertiesList propertiesList = (PropertiesList) ksqlEntityList.get(0); + propertiesList.getProperties().putAll(restClient.getLocalProperties()); + terminal.printKsqlEntityList( + Arrays.asList(propertiesList) + ); + } else if (statementContext.statement() instanceof SqlBaseParser.SetPropertyContext) { + SqlBaseParser.SetPropertyContext setPropertyContext = + (SqlBaseParser.SetPropertyContext) statementContext.statement(); + String property = AstBuilder.unquote(setPropertyContext.STRING(0).getText(), "'"); + String value = AstBuilder.unquote(setPropertyContext.STRING(1).getText(), "'"); + setProperty(property, value); + } else if (statementContext.statement() instanceof SqlBaseParser.UnsetPropertyContext) { + if (consecutiveStatements.length() != 0) { + printKsqlResponse( + restClient.makeKsqlRequest(consecutiveStatements.toString()) + ); + consecutiveStatements = new StringBuilder(); + } + SqlBaseParser.UnsetPropertyContext unsetPropertyContext = + (SqlBaseParser.UnsetPropertyContext) statementContext.statement(); + String property = AstBuilder.unquote(unsetPropertyContext.STRING().getText(), "'"); + unsetProperty(property); + } else if (statementContext.statement() instanceof SqlBaseParser.RunScriptContext) { + SqlBaseParser.RunScriptContext runScriptContext = + (SqlBaseParser.RunScriptContext) statementContext.statement(); + String schemaFilePath = AstBuilder.unquote(runScriptContext.STRING().getText(), "'"); + String fileContent; + try { + fileContent = new String(Files.readAllBytes(Paths.get(schemaFilePath))); + } catch (IOException e) { + throw new KsqlException(" Could not read statements from file: " + schemaFilePath); + } + setProperty(DdlConfig.SCHEMA_FILE_CONTENT_PROPERTY, fileContent); + printKsqlResponse( + restClient.makeKsqlRequest(statementText) + ); + } else if (statementContext.statement() instanceof SqlBaseParser.RegisterTopicContext) { + CliUtils cliUtils = new CliUtils(); + Optional avroSchema = cliUtils.getAvroSchemaIfAvroTopic( + (SqlBaseParser.RegisterTopicContext) statementContext.statement()); + if (avroSchema.isPresent()) { + setProperty(DdlConfig.AVRO_SCHEMA, avroSchema.get()); + } + consecutiveStatements.append(statementText); + } else { + consecutiveStatements.append(statementText); + } + } + if (consecutiveStatements.length() != 0) { + printKsqlResponse( + restClient.makeKsqlRequest(consecutiveStatements.toString()) + ); + } + } + + private void printKsqlResponse(RestResponse response) throws IOException { + if (response.isSuccessful()) { + KsqlEntityList ksqlEntities = response.getResponse(); + boolean noErrorFromServer = true; + for (KsqlEntity entity : ksqlEntities) { + if (entity instanceof ErrorMessageEntity) { + terminal.printErrorMessage(((ErrorMessageEntity) entity).getErrorMessage()); + noErrorFromServer = false; + } else if (entity instanceof CommandStatusEntity && + (((CommandStatusEntity) entity).getCommandStatus().getStatus() == CommandStatus.Status.ERROR)) { + String fullMessage = ((CommandStatusEntity) entity).getCommandStatus().getMessage(); + terminal.printError(fullMessage.split("\n")[0], fullMessage); + noErrorFromServer = false; + } + } + if (noErrorFromServer) { + terminal.printKsqlEntityList(response.getResponse()); + } + } else { + terminal.printErrorMessage(response.getErrorMessage()); + } + } + + private void handleStreamedQuery(String query) + throws IOException, InterruptedException, ExecutionException { + RestResponse queryResponse = + restClient.makeQueryRequest(query); + + if (queryResponse.isSuccessful()) { + try (KsqlRestClient.QueryStream queryStream = queryResponse.getResponse()) { + Future queryStreamFuture = queryStreamExecutorService.submit(new Runnable() { + @Override + public void run() { + for (long rowsRead = 0; keepReading(rowsRead) && queryStream.hasNext(); rowsRead++) { + try { + terminal.printStreamedRow(queryStream.next()); + } catch (IOException exception) { + throw new RuntimeException(exception); + } + } + } + }); + + terminal.handle(Terminal.Signal.INT, new Terminal.SignalHandler() { + @Override + public void handle(Terminal.Signal signal) { + terminal.handle(Terminal.Signal.INT, Terminal.SignalHandler.SIG_IGN); + queryStreamFuture.cancel(true); + } + }); + + try { + if (streamedQueryTimeoutMs == null) { + queryStreamFuture.get(); + Thread.sleep(1000); // TODO: Make things work without this + } else { + try { + queryStreamFuture.get(streamedQueryTimeoutMs, TimeUnit.MILLISECONDS); + } catch (TimeoutException exception) { + queryStreamFuture.cancel(true); + } + } + } catch (CancellationException exception) { + // It's fine + } + } finally { + terminal.writer().println("Query terminated"); + terminal.flush(); + } + } else { + terminal.printErrorMessage(queryResponse.getErrorMessage()); + } + } + + private boolean keepReading(long rowsRead) { + return streamedQueryRowLimit == null || rowsRead < streamedQueryRowLimit; + } + + private void handlePrintedTopic(String printTopic) + throws InterruptedException, ExecutionException, IOException { + RestResponse topicResponse = + restClient.makePrintTopicRequest(printTopic); + + if (topicResponse.isSuccessful()) { + try (Scanner topicStreamScanner = new Scanner(topicResponse.getResponse())) { + Future topicPrintFuture = queryStreamExecutorService.submit(new Runnable() { + @Override + public void run() { + while (topicStreamScanner.hasNextLine()) { + String line = topicStreamScanner.nextLine(); + if (!line.isEmpty()) { + terminal.writer().println(line); + terminal.flush(); + } + } + } + }); + + terminal.handle(Terminal.Signal.INT, new Terminal.SignalHandler() { + @Override + public void handle(Terminal.Signal signal) { + terminal.handle(Terminal.Signal.INT, Terminal.SignalHandler.SIG_IGN); + topicPrintFuture.cancel(true); + } + }); + + try { + topicPrintFuture.get(); + } catch (CancellationException exception) { + terminal.writer().println("Topic printing ceased"); + terminal.flush(); + } + topicResponse.getResponse().close(); + } + } else { + terminal.writer().println(topicResponse.getErrorMessage().getMessage()); + terminal.flush(); + } + } + + private void setProperty(String property, String value) { + String parsedProperty; + ConfigDef.Type type; + if (StreamsConfig.configDef().configKeys().containsKey(property)) { + type = StreamsConfig.configDef().configKeys().get(property).type; + parsedProperty = property; + } else if (CONSUMER_CONFIG_DEF.configKeys().containsKey(property)) { + type = CONSUMER_CONFIG_DEF.configKeys().get(property).type; + parsedProperty = property; + } else if (PRODUCER_CONFIG_DEF.configKeys().containsKey(property)) { + type = PRODUCER_CONFIG_DEF.configKeys().get(property).type; + parsedProperty = property; + } else if (property.startsWith(StreamsConfig.CONSUMER_PREFIX)) { + parsedProperty = property.substring(StreamsConfig.CONSUMER_PREFIX.length()); + ConfigDef.ConfigKey configKey = + CONSUMER_CONFIG_DEF.configKeys().get(parsedProperty); + if (configKey == null) { + throw new IllegalArgumentException(String.format( + "Invalid consumer property: '%s'", + parsedProperty + )); + } + type = configKey.type; + } else if (property.startsWith(StreamsConfig.PRODUCER_PREFIX)) { + parsedProperty = property.substring(StreamsConfig.PRODUCER_PREFIX.length()); + ConfigDef.ConfigKey configKey = + PRODUCER_CONFIG_DEF.configKeys().get(parsedProperty); + if (configKey == null) { + throw new IllegalArgumentException(String.format( + "Invalid producer property: '%s'", + parsedProperty + )); + } + type = configKey.type; + } else if (property.equalsIgnoreCase(DdlConfig.AVRO_SCHEMA)) { + restClient.setProperty(property, value); + return; + } else if (property.equalsIgnoreCase(DdlConfig.SCHEMA_FILE_CONTENT_PROPERTY)) { + restClient.setProperty(property, value); + return; + } else { + throw new IllegalArgumentException(String.format( + "Not recognizable as streams, consumer, or producer property: '%s'", + property + )); + } + + if (KsqlEngine.getImmutableProperties().contains(parsedProperty)) { + throw new IllegalArgumentException(String.format( + "Cannot override property '%s'", + property + )); + } + + Object parsedValue = ConfigDef.parseType(parsedProperty, value, type); + Object priorValue = restClient.setProperty(property, parsedValue); + + terminal.writer().printf( + "Successfully changed local property '%s' from '%s' to '%s'%n", + property, + priorValue, + parsedValue + ); + terminal.flush(); + } + + private void unsetProperty(String property) { + if (restClient.unsetProperty(property)) { + Object value = restClient.getLocalProperties().get(property); + terminal.writer().printf( + "Successfully unset local property '%s' (value was '%s')%n", + property, + value + ); + } else { + throw new IllegalArgumentException(String.format( + "Cannot unset local property '%s' which was never set in the first place", + property + )); + } + } + + // It seemed like a good idea at the time + private static ConfigDef getConfigDef(Class classs) { + try { + java.lang.reflect.Field field = classs.getDeclaredField("CONFIG"); + field.setAccessible(true); + return (ConfigDef) field.get(null); + } catch (Exception exception) { + // uhhh... + // TODO + return null; + } + } + +} \ No newline at end of file diff --git a/ksql-cli/src/main/java/io/confluent/ksql/cli/LocalCli.java b/ksql-cli/src/main/java/io/confluent/ksql/cli/LocalCli.java new file mode 100644 index 000000000000..9903dc40c7de --- /dev/null +++ b/ksql-cli/src/main/java/io/confluent/ksql/cli/LocalCli.java @@ -0,0 +1,61 @@ +/** + * Copyright 2017 Confluent Inc. + **/ + +package io.confluent.ksql.cli; + +import io.confluent.ksql.rest.client.KsqlRestClient; +import io.confluent.ksql.rest.server.KsqlRestApplication; +import io.confluent.ksql.cli.console.Console; + +import java.io.IOException; +import java.util.concurrent.TimeoutException; + +public class LocalCli extends Cli { + + private final KsqlRestApplication restServer; + + public LocalCli( + Long streamedQueryRowLimit, + Long streamedQueryTimeoutMs, + KsqlRestClient restClient, + Console terminal, + KsqlRestApplication restServer + ) throws Exception { + super( + streamedQueryRowLimit, + streamedQueryTimeoutMs, + restClient, + terminal + ); + this.restServer = restServer; + } + + @Override + public void close() throws IOException { + try { + restServer.getKsqlEngine().terminateAllQueries(); + restServer.stop(); + restServer.join(); + } catch (TimeoutException exception) { + /* + This is only thrown under the following circumstances: + + 1. A user makes a request for a streamed query. + 2. The user terminates the request for the streamed query. + 3. Before the thread(s) responsible for streaming the query have terminated, + restServer.stop() is called. + + Even if the threads then manage to terminate within the graceful shutdown window for the + server, the TimeoutException is still thrown. + + TODO: Prevent the TimeoutException from being thrown when this happens. + */ + } catch (Exception exception) { + throw new RuntimeException(exception); + } finally { + super.close(); + } + } + +} diff --git a/ksql-cli/src/main/java/io/confluent/ksql/cli/RemoteCli.java b/ksql-cli/src/main/java/io/confluent/ksql/cli/RemoteCli.java new file mode 100644 index 000000000000..be94eea61210 --- /dev/null +++ b/ksql-cli/src/main/java/io/confluent/ksql/cli/RemoteCli.java @@ -0,0 +1,68 @@ +/** + * Copyright 2017 Confluent Inc. + **/ + +package io.confluent.ksql.cli; + +import io.confluent.ksql.rest.client.KsqlRestClient; +import io.confluent.ksql.cli.console.CliSpecificCommand; +import io.confluent.ksql.cli.console.Console; + +import javax.ws.rs.ProcessingException; +import java.io.IOException; + +public class RemoteCli extends Cli { + + public RemoteCli( + Long streamedQueryRowLimit, + Long streamedQueryTimeoutMs, + KsqlRestClient restClient, + Console terminal + ) throws IOException { + super( + streamedQueryRowLimit, + streamedQueryTimeoutMs, + restClient, + terminal + ); + + validateClient(); + + terminal.registerCliSpecificCommand(new CliSpecificCommand() { + @Override + public String getName() { + return "server"; + } + + @Override + public void printHelp() { + terminal.writer().println("\tserver: Show the current server"); + terminal.writer().println("\tserver : Change the current server to "); + terminal.writer().println("\t example: " + + "\"server http://my.awesome.server.com:9098\"" + ); + } + + @Override + public void execute(String commandStrippedLine) throws IOException { + if (commandStrippedLine.isEmpty()) { + terminal.writer().println(restClient.getServerAddress()); + } else { + String serverAddress = commandStrippedLine.trim(); + restClient.setServerAddress(serverAddress); + validateClient(); + } + } + }); + } + + private void validateClient() { + try { + restClient.makeRootRequest(); + } catch (IllegalArgumentException exception) { + terminal.writer().println("Server URL must begin with protocol (e.g., http:// or https://)"); + } catch (ProcessingException exception) { + terminal.writer().println("Warning: remote server address may not be valid"); + } + } +} diff --git a/ksql-cli/src/main/java/io/confluent/ksql/cli/StandaloneExecutor.java b/ksql-cli/src/main/java/io/confluent/ksql/cli/StandaloneExecutor.java new file mode 100644 index 000000000000..9df578098e91 --- /dev/null +++ b/ksql-cli/src/main/java/io/confluent/ksql/cli/StandaloneExecutor.java @@ -0,0 +1,55 @@ +/** + * Copyright 2017 Confluent Inc. + **/ + +package io.confluent.ksql.cli; + + +import io.confluent.ksql.metastore.MetaStore; +import io.confluent.ksql.util.KafkaTopicClientImpl; +import io.confluent.ksql.util.KsqlConfig; +import io.confluent.ksql.util.Pair; +import io.confluent.ksql.util.PersistentQueryMetadata; +import io.confluent.ksql.util.QueryMetadata; +import org.slf4j.Logger; +import org.slf4j.LoggerFactory; + +import java.util.Collections; +import java.util.List; +import java.util.Map; + +import io.confluent.ksql.KsqlEngine; +import io.confluent.ksql.parser.tree.Statement; + +public class StandaloneExecutor { + + private static final Logger log = LoggerFactory.getLogger(StandaloneExecutor.class); + + KsqlEngine ksqlEngine; + + public StandaloneExecutor(Map streamProperties) { + KsqlConfig ksqlConfig = new KsqlConfig(streamProperties); + ksqlEngine = new KsqlEngine(ksqlConfig, new KafkaTopicClientImpl(ksqlConfig)); + } + + public void executeStatements(String queries) throws Exception { + MetaStore tempMetaStore = ksqlEngine.getMetaStore().clone(); + List> queryList = ksqlEngine.parseQueries(queries, + Collections.emptyMap(), + tempMetaStore); + List queryMetadataList = ksqlEngine.planQueries( + false, queryList, Collections.emptyMap(), tempMetaStore); + for (QueryMetadata queryMetadata: queryMetadataList) { + if (queryMetadata instanceof PersistentQueryMetadata) { + PersistentQueryMetadata persistentQueryMetadata = (PersistentQueryMetadata) queryMetadata; + persistentQueryMetadata.getKafkaStreams().start(); + } else { + System.err.println("Ignoring statemenst: " + queryMetadata.getStatementString()); + System.err.println("Only CREATE statements can run in KSQL embedded mode."); + log.warn("Ignoring statemenst: {}", queryMetadata.getStatementString()); + log.warn("Only CREATE statements can run in KSQL embedded mode."); + } + } + } + +} diff --git a/ksql-cli/src/main/java/io/confluent/ksql/cli/commands/AbstractCliCommands.java b/ksql-cli/src/main/java/io/confluent/ksql/cli/commands/AbstractCliCommands.java new file mode 100644 index 000000000000..dc0e442ff128 --- /dev/null +++ b/ksql-cli/src/main/java/io/confluent/ksql/cli/commands/AbstractCliCommands.java @@ -0,0 +1,77 @@ +/** + * Copyright 2017 Confluent Inc. + **/ + +package io.confluent.ksql.cli.commands; + +import com.github.rvesse.airline.annotations.Option; +import com.github.rvesse.airline.annotations.restrictions.ranges.LongRange; +import com.github.rvesse.airline.parser.errors.ParseException; +import io.confluent.ksql.cli.Cli; +import io.confluent.ksql.cli.console.OutputFormat; + +public abstract class AbstractCliCommands implements Runnable { + + private static final String NON_INTERACTIVE_TEXT_OPTION_NAME = "--exec"; + private static final String STREAMED_QUERY_ROW_LIMIT_OPTION_NAME = "--query-row-limit"; + private static final String STREAMED_QUERY_TIMEOUT_OPTION_NAME = "--query-timeout"; + + private static final String OUTPUT_FORMAT_OPTION_NAME = "--output"; + + @Option( + name = NON_INTERACTIVE_TEXT_OPTION_NAME, + description = "Text to run non-interactively, exiting immediately after" + ) + String nonInteractiveText; + + @Option( + name = STREAMED_QUERY_ROW_LIMIT_OPTION_NAME, + description = "An optional maximum number of rows to read from streamed queries" + ) + + @LongRange( + min = 1 + ) + Long streamedQueryRowLimit; + + @Option( + name = STREAMED_QUERY_TIMEOUT_OPTION_NAME, + description = "An optional time limit (in milliseconds) for streamed queries" + ) + @LongRange( + min = 1 + ) + Long streamedQueryTimeoutMs; + + @Option( + name = OUTPUT_FORMAT_OPTION_NAME, + description = "The output format to use " + + "(either 'JSON' or 'TABULAR'; can be changed during REPL as well; " + + "defaults to TABULAR)" + ) + String outputFormat = OutputFormat.TABULAR.name(); + + @Override + public void run() { + try (Cli cli = getCli()) { + if (nonInteractiveText != null) { + cli.runNonInteractively(nonInteractiveText); + } else { + cli.runInteractively(); + } + } catch (Exception exception) { + throw new RuntimeException(exception); + } + } + + protected abstract Cli getCli() throws Exception; + + protected OutputFormat parseOutputFormat() { + try { + return OutputFormat.valueOf(outputFormat.toUpperCase()); + } catch (IllegalArgumentException exception) { + throw new ParseException(String.format("Invalid output format: '%s'", outputFormat)); + } + } + +} diff --git a/ksql-cli/src/main/java/io/confluent/ksql/cli/commands/Local.java b/ksql-cli/src/main/java/io/confluent/ksql/cli/commands/Local.java new file mode 100644 index 000000000000..5b11ae685dee --- /dev/null +++ b/ksql-cli/src/main/java/io/confluent/ksql/cli/commands/Local.java @@ -0,0 +1,156 @@ +/** + * Copyright 2017 Confluent Inc. + **/ + +package io.confluent.ksql.cli.commands; + +import com.github.rvesse.airline.annotations.Command; +import com.github.rvesse.airline.annotations.Option; +import com.github.rvesse.airline.annotations.restrictions.Port; +import com.github.rvesse.airline.annotations.restrictions.PortType; +import io.confluent.ksql.cli.LocalCli; +import io.confluent.ksql.rest.client.KsqlRestClient; +import io.confluent.ksql.rest.server.KsqlRestApplication; +import io.confluent.ksql.rest.server.KsqlRestConfig; +import io.confluent.ksql.util.CliUtils; +import io.confluent.ksql.cli.console.Console; +import io.confluent.ksql.cli.console.JLineTerminal; +import io.confluent.ksql.util.KsqlConfig; + +import org.apache.kafka.streams.StreamsConfig; + +import java.io.FileInputStream; +import java.io.IOException; +import java.util.Properties; + +@Command(name = "local", description = "Run a local (standalone) Cli session") +public class Local extends AbstractCliCommands { + + private static final String PROPERTIES_FILE_OPTION_NAME = "--properties-file"; + + private static final String PORT_NUMBER_OPTION_NAME = "--port-number"; + private static final int PORT_NUMBER_OPTION_DEFAULT = 9098; + + private static final String KAFKA_BOOTSTRAP_SERVER_OPTION_NAME = "--bootstrap-server"; + private static final String KAFKA_BOOTSTRAP_SERVER_OPTION_DEFAULT = "localhost:9092"; + + private static final String SERVICE_ID_OPTION_NAME = "--service-id"; + private static final String SERVICE_ID_OPTION_DEFAULT = KsqlConfig.KSQL_SERVICE_ID_DEFAULT; + + private static final String COMMAND_TOPIC_SUFFIX_OPTION_NAME = "--command-topic-suffix"; + private static final String COMMAND_TOPIC_SUFFIX_OPTION_DEFAULT = "commands"; + + @Port(acceptablePorts = PortType.ANY) + @Option( + name = PORT_NUMBER_OPTION_NAME, + description = "The portNumber to use for the connection (defaults to " + + PORT_NUMBER_OPTION_DEFAULT + + ")" + ) + int portNumber = PORT_NUMBER_OPTION_DEFAULT; + + @Option( + name = KAFKA_BOOTSTRAP_SERVER_OPTION_NAME, + description = "The Kafka server to connect to (defaults to " + + KAFKA_BOOTSTRAP_SERVER_OPTION_DEFAULT + + ")" + ) + String bootstrapServer; + + @Option( + name = SERVICE_ID_OPTION_NAME, + description = "The application ID to use for the created Kafka Streams instance(s) " + + "(defaults to '" + + SERVICE_ID_OPTION_DEFAULT + + "')" + ) + String serviceId; + + @Option( + name = COMMAND_TOPIC_SUFFIX_OPTION_NAME, + description = "The suffix to append to the end of the name of the command topic " + + "(defaults to '" + + COMMAND_TOPIC_SUFFIX_OPTION_DEFAULT + + "')" + ) + String commandTopicSuffix; + + @Option( + name = PROPERTIES_FILE_OPTION_NAME, + description = "A file specifying properties for Ksql and its underlying Kafka Streams " + + "instance(s) (can specify port number, bootstrap server, etc. but these options will " + + "be overridden if also given via flags)" + ) + String propertiesFile; + + @Override + public LocalCli getCli() throws Exception { + Properties serverProperties; + try { + serverProperties = getStandaloneProperties(); + } catch (IOException exception) { + throw new RuntimeException(exception); + } + + // Have to override listeners config to make sure it aligns with port number for client + serverProperties.put(KsqlRestConfig.LISTENERS_CONFIG, CliUtils.getLocalServerAddress(portNumber)); + KsqlRestConfig restServerConfig = new KsqlRestConfig(serverProperties); + KsqlRestApplication restServer = KsqlRestApplication.buildApplication(restServerConfig, false); + restServer.start(); + + KsqlRestClient restClient = new KsqlRestClient(CliUtils.getLocalServerAddress(portNumber)); + Console terminal = new JLineTerminal(parseOutputFormat(), restClient); + + return new LocalCli( + streamedQueryRowLimit, + streamedQueryTimeoutMs, + restClient, + terminal, + restServer + ); + } + + private Properties getStandaloneProperties() throws IOException { + Properties properties = new Properties(); + addDefaultProperties(properties); + addFileProperties(properties); + addFlagProperties(properties); + return properties; + } + + private void addDefaultProperties(Properties properties) { + properties.put(StreamsConfig.BOOTSTRAP_SERVERS_CONFIG, KAFKA_BOOTSTRAP_SERVER_OPTION_DEFAULT); + properties.put(KsqlConfig.KSQL_SERVICE_ID_CONFIG, SERVICE_ID_OPTION_DEFAULT); + properties.put( + KsqlRestConfig.COMMAND_TOPIC_SUFFIX_CONFIG, + COMMAND_TOPIC_SUFFIX_OPTION_DEFAULT + ); + properties.put(StreamsConfig.APPLICATION_ID_CONFIG, KsqlConfig.KSQL_SERVICE_ID_DEFAULT); + } + + private void addFileProperties(Properties properties) throws IOException { + if (propertiesFile != null) { + properties.load(new FileInputStream(propertiesFile)); + if (properties.containsKey(KsqlConfig.KSQL_SERVICE_ID_CONFIG)) { + properties + .put(StreamsConfig.APPLICATION_ID_CONFIG, + properties.getProperty(KsqlConfig.KSQL_SERVICE_ID_CONFIG)); + } else { + properties.put(StreamsConfig.APPLICATION_ID_CONFIG, KsqlConfig.KSQL_SERVICE_ID_DEFAULT); + } + } + } + + private void addFlagProperties(Properties properties) { + if (bootstrapServer != null) { + properties.put(StreamsConfig.BOOTSTRAP_SERVERS_CONFIG, bootstrapServer); + } + if (serviceId != null) { + properties.put(StreamsConfig.APPLICATION_ID_CONFIG, serviceId); + properties.put(KsqlConfig.KSQL_SERVICE_ID_CONFIG, serviceId); + } + if (commandTopicSuffix != null) { + properties.put(KsqlRestConfig.COMMAND_TOPIC_SUFFIX_CONFIG, commandTopicSuffix); + } + } +} diff --git a/ksql-cli/src/main/java/io/confluent/ksql/cli/commands/Remote.java b/ksql-cli/src/main/java/io/confluent/ksql/cli/commands/Remote.java new file mode 100644 index 000000000000..f6d9ebd3031c --- /dev/null +++ b/ksql-cli/src/main/java/io/confluent/ksql/cli/commands/Remote.java @@ -0,0 +1,87 @@ +/** + * Copyright 2017 Confluent Inc. + **/ + +package io.confluent.ksql.cli.commands; + +import com.github.rvesse.airline.annotations.Arguments; +import com.github.rvesse.airline.annotations.Command; +import com.github.rvesse.airline.annotations.Option; +import com.github.rvesse.airline.annotations.restrictions.Once; +import com.github.rvesse.airline.annotations.restrictions.Required; + +import org.apache.kafka.streams.StreamsConfig; + +import io.confluent.ksql.cli.Cli; +import io.confluent.ksql.cli.RemoteCli; +import io.confluent.ksql.rest.client.KsqlRestClient; +import io.confluent.ksql.cli.console.Console; +import io.confluent.ksql.cli.console.JLineTerminal; +import io.confluent.ksql.util.KsqlConfig; + +import java.io.FileInputStream; +import java.io.IOException; +import java.util.HashMap; +import java.util.Map; +import java.util.Properties; + +@Command(name = "remote", description = "Connect to a remote (possibly distributed) Ksql session") +public class Remote extends AbstractCliCommands { + + @Once + @Required + @Arguments( + title = "server", + description = "The address of the Ksql server to connect to (ex: http://confluent.io:9098)" + ) + String server; + private static final String PROPERTIES_FILE_OPTION_NAME = "--properties-file"; + + @Option( + name = PROPERTIES_FILE_OPTION_NAME, + description = "A file specifying properties for Ksql and its underlying Kafka Streams " + + "instance(s) (can specify port number, bootstrap server, etc. " + + "but these options will " + + "be overridden if also given via flags)" + ) + String propertiesFile; + + @Override + public Cli getCli() throws Exception { + Map propertiesMap = new HashMap<>(); + Properties properties = getStandaloneProperties(); + for (String key: properties.stringPropertyNames()) { + propertiesMap.put(key, properties.getProperty(key)); + } + + KsqlRestClient restClient = new KsqlRestClient(server, propertiesMap); + Console terminal = new JLineTerminal(parseOutputFormat(), restClient); + + return new RemoteCli( + streamedQueryRowLimit, + streamedQueryTimeoutMs, + restClient, + terminal + ); + } + + private Properties getStandaloneProperties() throws IOException { + Properties properties = new Properties(); + properties.put(StreamsConfig.APPLICATION_ID_CONFIG, KsqlConfig.KSQL_SERVICE_ID_DEFAULT); + addFileProperties(properties); + return properties; + } + + private void addFileProperties(Properties properties) throws IOException { + if (propertiesFile != null) { + properties.load(new FileInputStream(propertiesFile)); + if (properties.containsKey(KsqlConfig.KSQL_SERVICE_ID_CONFIG)) { + properties + .put(StreamsConfig.APPLICATION_ID_CONFIG, + properties.getProperty(KsqlConfig.KSQL_SERVICE_ID_CONFIG)); + } else { + properties.put(StreamsConfig.APPLICATION_ID_CONFIG, KsqlConfig.KSQL_SERVICE_ID_DEFAULT); + } + } + } +} diff --git a/ksql-cli/src/main/java/io/confluent/ksql/cli/commands/Standalone.java b/ksql-cli/src/main/java/io/confluent/ksql/cli/commands/Standalone.java new file mode 100644 index 000000000000..d3fc0dfcbf09 --- /dev/null +++ b/ksql-cli/src/main/java/io/confluent/ksql/cli/commands/Standalone.java @@ -0,0 +1,94 @@ +/** + * Copyright 2017 Confluent Inc. + **/ + +package io.confluent.ksql.cli.commands; + +import com.github.rvesse.airline.annotations.Arguments; +import com.github.rvesse.airline.annotations.Command; +import com.github.rvesse.airline.annotations.Option; +import com.github.rvesse.airline.annotations.restrictions.Once; +import com.github.rvesse.airline.annotations.restrictions.Required; +import io.confluent.ksql.cli.Cli; +import io.confluent.ksql.util.CliUtils; +import io.confluent.ksql.cli.StandaloneExecutor; +import io.confluent.ksql.util.KsqlConfig; + +import org.apache.kafka.streams.StreamsConfig; + +import java.io.FileInputStream; +import java.io.IOException; +import java.util.Properties; + +@Command(name = "standalone", description = "Running KSQL statements from a file.") +public class Standalone extends AbstractCliCommands { + + private static final String PROPERTIES_FILE_OPTION_NAME = "--properties-file"; + + private static final String KAFKA_BOOTSTRAP_SERVER_OPTION_NAME = "--bootstrap-server"; + private static final String KAFKA_BOOTSTRAP_SERVER_OPTION_DEFAULT = "localhost:9092"; + + private static final String APPLICATION_ID_OPTION_NAME = "--application-id"; + private static final String APPLICATION_ID_OPTION_DEFAULT = "ksql_standalone_cli"; + + @Option( + name = PROPERTIES_FILE_OPTION_NAME, + description = "A file specifying properties for Ksql and its underlying Kafka Streams " + + "instance(s) (can specify port number, bootstrap server, etc. " + + "but these options will " + + "be overridden if also given via flags)" + ) + String propertiesFile; + + @Once + @Required + @Arguments( + title = "query-file", + description = "Path to the query file in the local machine.)" + ) + String queryFile; + + @Override + protected Cli getCli() throws Exception { + return null; + } + + @Override + public void run() { + try { + CliUtils cliUtils = new CliUtils(); + String queries = cliUtils.readQueryFile(queryFile); + StandaloneExecutor standaloneExecutor = new StandaloneExecutor(getStandaloneProperties()); + standaloneExecutor.executeStatements(queries); + + } catch (Exception e) { + e.printStackTrace(); + } + } + + private Properties getStandaloneProperties() throws IOException { + Properties properties = new Properties(); + addDefaultProperties(properties); + addFileProperties(properties); + return properties; + } + + private void addDefaultProperties(Properties properties) { + properties.put(StreamsConfig.BOOTSTRAP_SERVERS_CONFIG, KAFKA_BOOTSTRAP_SERVER_OPTION_DEFAULT); + properties.put(StreamsConfig.APPLICATION_ID_CONFIG, KsqlConfig.KSQL_SERVICE_ID_DEFAULT); + } + + private void addFileProperties(Properties properties) throws IOException { + if (propertiesFile != null) { + properties.load(new FileInputStream(propertiesFile)); + if (properties.containsKey(KsqlConfig.KSQL_SERVICE_ID_CONFIG)) { + properties + .put(StreamsConfig.APPLICATION_ID_CONFIG, + properties.getProperty(KsqlConfig.KSQL_SERVICE_ID_CONFIG)); + } else { + properties.put(StreamsConfig.APPLICATION_ID_CONFIG, KsqlConfig.KSQL_SERVICE_ID_DEFAULT); + } + } + } + +} diff --git a/ksql-cli/src/main/java/io/confluent/ksql/cli/console/CliSpecificCommand.java b/ksql-cli/src/main/java/io/confluent/ksql/cli/console/CliSpecificCommand.java new file mode 100644 index 000000000000..d8a620fad7b2 --- /dev/null +++ b/ksql-cli/src/main/java/io/confluent/ksql/cli/console/CliSpecificCommand.java @@ -0,0 +1,15 @@ +/** + * Copyright 2017 Confluent Inc. + **/ + +package io.confluent.ksql.cli.console; + +import java.io.IOException; + +public interface CliSpecificCommand { + String getName(); + + void printHelp(); + + void execute(String commandStrippedLine) throws IOException; +} diff --git a/ksql-cli/src/main/java/io/confluent/ksql/cli/console/Console.java b/ksql-cli/src/main/java/io/confluent/ksql/cli/console/Console.java new file mode 100644 index 000000000000..11383ff8abf2 --- /dev/null +++ b/ksql-cli/src/main/java/io/confluent/ksql/cli/console/Console.java @@ -0,0 +1,520 @@ +/** + * Copyright 2017 Confluent Inc. + **/ + +package io.confluent.ksql.cli.console; + +import com.fasterxml.jackson.core.JsonGenerator; +import com.fasterxml.jackson.databind.ObjectMapper; +import io.confluent.ksql.physical.GenericRow; +import io.confluent.ksql.rest.client.KsqlRestClient; +import io.confluent.ksql.rest.entity.CommandStatus; +import io.confluent.ksql.rest.entity.CommandStatusEntity; +import io.confluent.ksql.rest.entity.ErrorMessage; +import io.confluent.ksql.rest.entity.ErrorMessageEntity; +import io.confluent.ksql.rest.entity.ExecutionPlan; +import io.confluent.ksql.rest.entity.KafkaTopicInfo; +import io.confluent.ksql.rest.entity.KafkaTopicsList; +import io.confluent.ksql.rest.entity.KsqlEntity; +import io.confluent.ksql.rest.entity.KsqlEntityList; +import io.confluent.ksql.rest.entity.KsqlTopicInfo; +import io.confluent.ksql.rest.entity.KsqlTopicsList; +import io.confluent.ksql.rest.entity.PropertiesList; +import io.confluent.ksql.rest.entity.Queries; +import io.confluent.ksql.rest.entity.SchemaMapper; +import io.confluent.ksql.rest.entity.ServerInfo; +import io.confluent.ksql.rest.entity.SourceDescription; +import io.confluent.ksql.rest.entity.StreamedRow; +import io.confluent.ksql.rest.entity.StreamsList; +import io.confluent.ksql.rest.entity.TablesList; +import io.confluent.ksql.rest.entity.TopicDescription; +import io.confluent.ksql.util.CliUtils; + +import org.jline.reader.EndOfFileException; +import org.jline.reader.History; +import org.jline.terminal.Terminal; +import org.jline.utils.InfoCmp; +import org.slf4j.Logger; +import org.slf4j.LoggerFactory; + +import java.io.Closeable; +import java.io.IOException; +import java.io.PrintWriter; +import java.util.ArrayList; +import java.util.Arrays; +import java.util.Collections; +import java.util.LinkedHashMap; +import java.util.List; +import java.util.Map; +import java.util.Objects; +import java.util.stream.Collectors; + +public abstract class Console implements Closeable { + + private static final Logger LOGGER = LoggerFactory.getLogger(Console.class); + + private LineReader lineReader; + private final ObjectMapper objectMapper; + private final KsqlRestClient restClient; + private final LinkedHashMap cliSpecificCommands; + + private OutputFormat outputFormat; + + public Console(OutputFormat outputFormat, KsqlRestClient restClient) { + Objects.requireNonNull(outputFormat, "Must provide the terminal with a beginning output format"); + Objects.requireNonNull(restClient, "Must provide the terminal with a REST client"); + + this.outputFormat = outputFormat; + this.restClient = restClient; + + this.cliSpecificCommands = new LinkedHashMap<>(); + + this.objectMapper = new ObjectMapper().disable(JsonGenerator.Feature.AUTO_CLOSE_TARGET); + new SchemaMapper().registerToObjectMapper(objectMapper); + + registerDefaultCommands(); + } + + public abstract PrintWriter writer(); + + public abstract void flush(); + + public abstract int getWidth(); + + /* jline specific */ + + protected abstract LineReader buildLineReader(); + + protected abstract void puts(InfoCmp.Capability capability); + + public abstract Terminal.SignalHandler handle(Terminal.Signal signal, Terminal.SignalHandler signalHandler); + + /* public */ + + public void addResult(GenericRow row) { + // do nothing by default, test classes can use this method to obtain typed results + } + + public void addResult(List columnHeaders, List> rowValues) { + // do nothing by default, test classes can use this method to obtain typed results + } + + public LinkedHashMap getCliSpecificCommands() { + return cliSpecificCommands; + } + + public LineReader getLineReader() { + if (lineReader == null) { + lineReader = buildLineReader(); + } + return lineReader; + } + + public void printErrorMessage(ErrorMessage errorMessage) { + printError(errorMessage.getMessage(), errorMessage.toString()); + } + + public void printError(String shortMsg, String fullMsg) { + LOGGER.error(fullMsg); + writer().println(shortMsg); + } + + public void printStreamedRow(StreamedRow row) throws IOException { + if (row.getErrorMessage() != null) { + printErrorMessage(row.getErrorMessage()); + } else { + switch (outputFormat) { + case JSON: + printAsJson(row.getRow().getColumns()); + break; + case TABULAR: + printAsTable(row.getRow()); + break; + default: + throw new RuntimeException(String.format( + "Unexpected output format: '%s'", + outputFormat.name() + )); + } + } + } + + public void printKsqlEntityList(List entityList) throws IOException { + switch (outputFormat) { + case JSON: + printAsJson(entityList); + break; + case TABULAR: + for (KsqlEntity ksqlEntity : entityList) { + writer().println(); + printAsTable(ksqlEntity); + } + break; + default: + throw new RuntimeException(String.format( + "Unexpected output format: '%s'", + outputFormat.name() + )); + } + } + + public void registerCliSpecificCommand(CliSpecificCommand cliSpecificCommand) { + cliSpecificCommands.put(cliSpecificCommand.getName(), cliSpecificCommand); + } + + public void setOutputFormat(String newFormat) { + try { + outputFormat = OutputFormat.get(newFormat); + writer().printf("Output format set to %s%n", outputFormat.name()); + } catch (IllegalArgumentException exception) { + writer().printf( + "Invalid output format: '%s' (valid formats: %s)%n", + newFormat, + OutputFormat.VALID_FORMATS + ); + } + } + + /* private */ + + private void registerDefaultCommands() { + registerCliSpecificCommand(new CliSpecificCommand() { + @Override + public String getName() { + return "help"; + } + + @Override + public void printHelp() { + writer().println("help:"); + writer().println("\tShow this message."); + } + + @Override + public void execute(String line) { + writer().println(); + writer().println("Description:"); + writer().println("\tThe KSQL CLI provides a terminal-based interactive shell for running queries. " + + "Each command must be on a separate line. " + + "For KSQL command syntax, see the documentation at https://github.com/confluentinc/ksql/docs/."); + writer().println(); + for (CliSpecificCommand cliSpecificCommand : cliSpecificCommands.values()) { + cliSpecificCommand.printHelp(); + writer().println(); + } + writer().println(); + writer().println("Default behavior:"); + writer().println(); + writer().println(" Lines are read one at a time and are sent to the server as " + + "KSQL unless one of the following is true:" + ); + writer().println(); + writer().println(" 1. The line is empty or entirely whitespace. In this case, " + + "no request is made to the server." + ); + writer().println(); + writer().println(" 2. The line ends with backslash ('\\'). In this case, lines are " + + "continuously read and stripped of their trailing newline and '\\' until one is " + + "encountered that does not end with '\\'; then, the concatenation of all lines read " + + "during this time is sent to the server as KSQL." + ); + writer().println(); + } + }); + + registerCliSpecificCommand(new CliSpecificCommand() { + @Override + public String getName() { + return "clear"; + } + + @Override + public void printHelp() { + writer().println("clear:"); + writer().println("\tClear the current terminal."); + } + + @Override + public void execute(String commandStrippedLine) throws IOException { + puts(InfoCmp.Capability.clear_screen); + flush(); + } + }); + + registerCliSpecificCommand(new CliSpecificCommand() { + + @Override + public String getName() { + return "output"; + } + + @Override + public void printHelp() { + writer().println("output:"); + writer().println("\tView the current output format."); + writer().println(""); + writer() .printf( + "output :", + OutputFormat.VALID_FORMATS + ); + writer().println(""); + writer() .printf( + "\tSet the output format to (valid formats: %s)%n", + OutputFormat.VALID_FORMATS + ); + writer().println("\tFor example: \"output JSON\""); + } + + @Override + public void execute(String commandStrippedLine) throws IOException { + String newFormat = commandStrippedLine.trim().toUpperCase(); + if (newFormat.isEmpty()) { + writer().printf("Current output format: %s%n", outputFormat.name()); + } else { + setOutputFormat(newFormat); + } + } + }); + + registerCliSpecificCommand(new CliSpecificCommand() { + @Override + public String getName() { + return "history"; + } + + @Override + public void printHelp() { + writer().println( + "history:"); + writer().println( + "\tShow previous lines entered during the current CLI session. You can use " + + "up and down arrow keys to navigate to the previous lines too." + ); + } + + @Override + public void execute(String commandStrippedLine) throws IOException { + for (History.Entry historyEntry : lineReader.getHistory()) { + writer().printf("%4d: %s%n", historyEntry.index(), historyEntry.line()); + } + flush(); + } + }); + + registerCliSpecificCommand(new CliSpecificCommand() { + @Override + public String getName() { + return "version"; + } + + @Override + public void printHelp() { + writer().println("version:"); + writer().println("\tGet the current KSQL version."); + } + + @Override + public void execute(String commandStrippedLine) throws IOException { + ServerInfo serverInfo = restClient.makeRootRequest().getResponse(); + writer().printf("Version: %s%n", serverInfo.getVersion()); + flush(); + } + }); + + registerCliSpecificCommand(new CliSpecificCommand() { + @Override + public String getName() { + return "exit"; + } + + @Override + public void printHelp() { + writer().println("exit:"); + writer().println( + "\tExit the CLI." + ); + } + + @Override + public void execute(String commandStrippedLine) throws IOException { + throw new EndOfFileException(); + } + }); + } + + + private void printTable(List columnHeaders, List> rowValues) { + if (columnHeaders.size() == 0) { + throw new RuntimeException("Cannot print table without columns"); + } + + addResult(columnHeaders, rowValues); + + Integer[] columnLengths = new Integer[columnHeaders.size()]; + int separatorLength = -1; + + for (int i = 0; i < columnLengths.length; i++) { + int columnLength = columnHeaders.get(i).length(); + for (List row : rowValues) { + columnLength = Math.max(columnLength, row.get(i).length()); + } + columnLengths[i] = columnLength; + separatorLength += columnLength + 3; + } + + String rowFormatString = constructRowFormatString(columnLengths); + + writer().printf(rowFormatString, columnHeaders.toArray()); + + writer().println(new String(new char[separatorLength]).replaceAll(".", "-")); + for (List row : rowValues) { + writer().printf(rowFormatString, row.toArray()); + } + + flush(); + } + + private void printAsTable(KsqlEntity ksqlEntity) { + List columnHeaders; + List> rowValues; + if (ksqlEntity instanceof CommandStatusEntity) { + CommandStatusEntity commandStatusEntity = (CommandStatusEntity) ksqlEntity; + columnHeaders = Arrays.asList("Message"); + CommandStatus commandStatus = commandStatusEntity.getCommandStatus(); + rowValues = Collections.singletonList(Arrays.asList( + commandStatus.getMessage().split("\n", 2)[0] + )); + } else if (ksqlEntity instanceof ErrorMessageEntity) { + ErrorMessage errorMessage = ((ErrorMessageEntity) ksqlEntity).getErrorMessage(); + printErrorMessage(errorMessage); + return; + } else if (ksqlEntity instanceof PropertiesList) { + PropertiesList propertiesList = CliUtils.propertiesListWithOverrides((PropertiesList) ksqlEntity, restClient.getLocalProperties()); + Map properties = propertiesList.getProperties(); + columnHeaders = Arrays.asList("Property", "Value"); + rowValues = properties.entrySet().stream() + .map(propertyEntry -> Arrays.asList( + propertyEntry.getKey(), + Objects.toString(propertyEntry.getValue()) + )).collect(Collectors.toList()); + } else if (ksqlEntity instanceof Queries) { + List runningQueries = ((Queries) ksqlEntity).getQueries(); + columnHeaders = Arrays.asList("Query ID", "Kafka Topic", "Query String"); + rowValues = runningQueries.stream() + .map(runningQuery -> Arrays.asList( + Long.toString(runningQuery.getId()), + runningQuery.getKafkaTopic(), + runningQuery.getQueryString() + )).collect(Collectors.toList()); + } else if (ksqlEntity instanceof SourceDescription) { + List fields = ((SourceDescription) ksqlEntity).getSchema(); + columnHeaders = Arrays.asList("Field", "Type"); + rowValues = fields.stream() + .map(field -> Arrays.asList(field.getName(), field.getType())) + .collect(Collectors.toList()); + } else if (ksqlEntity instanceof TopicDescription) { + columnHeaders = new ArrayList<>(); + columnHeaders.add("Topic Name"); + columnHeaders.add("Kafka Topic"); + columnHeaders.add("Type"); + List topicInfo = new ArrayList<>(); + TopicDescription topicDescription = (TopicDescription) ksqlEntity; + topicInfo.add(topicDescription.getName()); + topicInfo.add(topicDescription.getKafkaTopic()); + topicInfo.add(topicDescription.getFormat()); + if (topicDescription.getFormat().equalsIgnoreCase("AVRO")) { + columnHeaders.add("AvroSchema"); + topicInfo.add(topicDescription.getSchemaString()); + } + rowValues = Arrays.asList(topicInfo); + } else if (ksqlEntity instanceof StreamsList) { + List streamInfos = ((StreamsList) ksqlEntity).getStreams(); + columnHeaders = Arrays.asList("Stream Name", "Kafka Topic", "Format"); + rowValues = streamInfos.stream() + .map(streamInfo -> Arrays.asList(streamInfo.getName(), streamInfo.getTopic(), + streamInfo.getFormat())) + .collect(Collectors.toList()); + } else if (ksqlEntity instanceof TablesList) { + List tableInfos = ((TablesList) ksqlEntity).getTables(); + columnHeaders = Arrays.asList("Table Name", "Kafka Topic", "Format", "Windowed"); + rowValues = tableInfos.stream() + .map(tableInfo -> Arrays.asList( + tableInfo.getName(), + tableInfo.getTopic(), + tableInfo.getFormat(), + Boolean.toString(tableInfo.getIsWindowed())) + ).collect(Collectors.toList()); + } else if (ksqlEntity instanceof KsqlTopicsList) { + List topicInfos = ((KsqlTopicsList) ksqlEntity).getTopics(); + columnHeaders = Arrays.asList("Ksql Topic", "Kafka Topic", "Format"); + rowValues = topicInfos.stream() + .map(topicInfo -> Arrays.asList( + topicInfo.getName(), + topicInfo.getKafkaTopic(), + topicInfo.getFormat().name() + )).collect(Collectors.toList()); + } else if (ksqlEntity instanceof KafkaTopicsList) { + List topicInfos = ((KafkaTopicsList) ksqlEntity).getTopics(); + columnHeaders = Arrays.asList("Kafka Topic", "Registered", "Partitions", "Partition Replicas"); + rowValues = topicInfos.stream() + .map(topicInfo -> Arrays.asList( + topicInfo.getName(), + topicInfo.getRegistered(), + topicInfo.getPartitionCount(), + topicInfo.getReplicaInfo() + )).collect(Collectors.toList()); + } else if (ksqlEntity instanceof ExecutionPlan) { + ExecutionPlan executionPlan = (ExecutionPlan) ksqlEntity; + columnHeaders = Arrays.asList("Execution Plan"); + rowValues = Collections.singletonList(Arrays.asList( + executionPlan.getExecutionPlan() + )); + } else { + throw new RuntimeException(String.format( + "Unexpected KsqlEntity class: '%s'", + ksqlEntity.getClass().getCanonicalName() + )); + } + printTable(columnHeaders, rowValues); + } + + private void printAsTable(GenericRow row) { + addResult(row); + writer().println( + String.join(" | ", row.getColumns().stream().map(Objects::toString).collect(Collectors.toList())) + ); + flush(); + } + + private void printAsJson(Object o) throws IOException { + if (o instanceof PropertiesList) { + o = CliUtils.propertiesListWithOverrides((PropertiesList) o, restClient.getLocalProperties()); + } else if (o instanceof KsqlEntityList) { + List newEntities = new ArrayList<>(); + for (KsqlEntity ksqlEntity : (KsqlEntityList) o) { + if (ksqlEntity instanceof PropertiesList) { + ksqlEntity = CliUtils.propertiesListWithOverrides((PropertiesList) ksqlEntity, restClient.getLocalProperties()); + } + newEntities.add(ksqlEntity); + } + o = newEntities; + } else { + LOGGER.warn(String.format("Unexpected result class: '%s' found in printAsJson", o.getClass().getCanonicalName())); + } + objectMapper.writerWithDefaultPrettyPrinter().writeValue(writer(), o); + writer().println(); + flush(); + } + + private static String constructRowFormatString(Integer... lengths) { + List columnFormatStrings = Arrays.stream(lengths) + .map(Console::constructSingleColumnFormatString) + .collect(Collectors.toList()); + return String.format(" %s %n", String.join(" | ", columnFormatStrings)); + } + + private static String constructSingleColumnFormatString(Integer length) { + return String.format("%%%ds", (-1*length)); + } + +} \ No newline at end of file diff --git a/ksql-cli/src/main/java/io/confluent/ksql/cli/console/JLineReader.java b/ksql-cli/src/main/java/io/confluent/ksql/cli/console/JLineReader.java new file mode 100644 index 000000000000..31c4d536756e --- /dev/null +++ b/ksql-cli/src/main/java/io/confluent/ksql/cli/console/JLineReader.java @@ -0,0 +1,90 @@ +/** + * Copyright 2017 Confluent Inc. + **/ + +package io.confluent.ksql.cli.console; + +import io.confluent.ksql.util.CliUtils; +import org.jline.reader.Expander; +import org.jline.reader.History; +import org.jline.reader.LineReader; +import org.jline.reader.LineReaderBuilder; +import org.jline.reader.impl.DefaultExpander; +import org.jline.reader.impl.DefaultParser; +import org.jline.reader.impl.history.DefaultHistory; +import org.jline.terminal.Terminal; +import org.slf4j.Logger; +import org.slf4j.LoggerFactory; + +import java.io.IOException; +import java.nio.file.Path; +import java.nio.file.Paths; + +public class JLineReader implements io.confluent.ksql.cli.console.LineReader { + + private static final Logger LOGGER = LoggerFactory.getLogger(JLineReader.class); + + private static final String DEFAULT_PROMPT = "ksql> "; + + private final DefaultHistory history; + + private final LineReader lineReader; + private final String prompt; + + // Have to enable event expansion or multi-line parsing won't work, so a quick 'n dirty workaround + // will have to do to prevent strings like !! from being expanded by the line reader + private static class NoOpExpander extends DefaultExpander { + @Override + public String expandHistory(History history, String line) { + return line; + } + } + + public JLineReader(Terminal terminal) { + Expander expander = new NoOpExpander(); + + // The combination of parser/expander here allow for multiple-line commands connected by '\\' + DefaultParser parser = new DefaultParser(); + parser.setEofOnEscapedNewLine(true); + parser.setQuoteChars(new char[0]); + parser.setEscapeChars(new char[] {'\\'}); + + // TODO: specify a completer to use here via a call to LineReaderBuilder.completer() + this.lineReader = LineReaderBuilder.builder() + .appName("KSQL") + .expander(expander) + .parser(parser) + .terminal(terminal) + .build(); + + this.lineReader.setOpt(LineReader.Option.HISTORY_IGNORE_DUPS); + this.lineReader.setOpt(LineReader.Option.HISTORY_IGNORE_SPACE); + + Path historyFilePath = Paths.get(System.getProperty("history-file", System.getProperty("user.home") + "/.ksql-history")).toAbsolutePath(); + if (CliUtils.createFile(historyFilePath)) { + this.lineReader.setVariable(LineReader.HISTORY_FILE, historyFilePath); + LOGGER.info("Command history saved at: " + historyFilePath); + } else { + terminal.writer().println(String.format("WARNING: Unable to create command history file '%s', command history will not be saved.", historyFilePath)); + } + + this.lineReader.unsetOpt(LineReader.Option.HISTORY_INCREMENTAL); + this.history = new DefaultHistory(this.lineReader); + + this.prompt = DEFAULT_PROMPT; + } + + @Override + public Iterable getHistory() { + return lineReader.getHistory(); + } + + @Override + public String readLine() throws IOException { + String line = lineReader.readLine(prompt); + history.add(line); + history.save(); + return line; + } + +} diff --git a/ksql-cli/src/main/java/io/confluent/ksql/cli/console/JLineTerminal.java b/ksql-cli/src/main/java/io/confluent/ksql/cli/console/JLineTerminal.java new file mode 100644 index 000000000000..7e4cec015576 --- /dev/null +++ b/ksql-cli/src/main/java/io/confluent/ksql/cli/console/JLineTerminal.java @@ -0,0 +1,68 @@ +/** + * Copyright 2017 Confluent Inc. + **/ + +package io.confluent.ksql.cli.console; + +import io.confluent.ksql.rest.client.KsqlRestClient; +import org.jline.terminal.Terminal; +import org.jline.terminal.TerminalBuilder; +import org.jline.utils.InfoCmp; + +import java.io.IOException; +import java.io.PrintWriter; + +public class JLineTerminal extends Console { + + private final org.jline.terminal.Terminal terminal; + + public JLineTerminal(OutputFormat outputFormat, KsqlRestClient restClient) { + super(outputFormat, restClient); + + try { + terminal = TerminalBuilder.builder().system(true).build(); + } catch (IOException e) { + throw new RuntimeException("JLineTerminal failed to start!", e); + } + // Ignore ^C when not reading a line + terminal.handle(org.jline.terminal.Terminal.Signal.INT, org.jline.terminal.Terminal.SignalHandler.SIG_IGN); + } + + @Override + public PrintWriter writer() { + return terminal.writer(); + } + + @Override + public void flush() { + terminal.flush(); + } + + @Override + public int getWidth() { + return terminal.getWidth(); + } + + @Override + public void close() throws IOException { + terminal.close(); + } + + /* jline specific */ + + @Override + protected JLineReader buildLineReader() { + return new JLineReader(this.terminal); + } + + @Override + public void puts(InfoCmp.Capability capability) { + terminal.puts(capability); + } + + @Override + public Terminal.SignalHandler handle(Terminal.Signal signal, Terminal.SignalHandler signalHandler) { + return terminal.handle(signal, signalHandler); + } + +} \ No newline at end of file diff --git a/ksql-cli/src/main/java/io/confluent/ksql/cli/console/LineReader.java b/ksql-cli/src/main/java/io/confluent/ksql/cli/console/LineReader.java new file mode 100644 index 000000000000..d01c2146e35a --- /dev/null +++ b/ksql-cli/src/main/java/io/confluent/ksql/cli/console/LineReader.java @@ -0,0 +1,15 @@ +/** + * Copyright 2017 Confluent Inc. + **/ + +package io.confluent.ksql.cli.console; + +import org.jline.reader.History; + +import java.io.IOException; + +public interface LineReader { + Iterable getHistory(); + + String readLine() throws IOException; +} diff --git a/ksql-cli/src/main/java/io/confluent/ksql/cli/console/OutputFormat.java b/ksql-cli/src/main/java/io/confluent/ksql/cli/console/OutputFormat.java new file mode 100644 index 000000000000..2abf5d20ef98 --- /dev/null +++ b/ksql-cli/src/main/java/io/confluent/ksql/cli/console/OutputFormat.java @@ -0,0 +1,27 @@ +/** + * Copyright 2017 Confluent Inc. + **/ + +package io.confluent.ksql.cli.console; + +import java.util.Arrays; +import java.util.stream.Collectors; + +public enum OutputFormat { + JSON, + TABULAR; + + public static final String VALID_FORMATS = String.format( + "'%s'", + String.join( + "', '", + Arrays.stream(OutputFormat.values()) + .map(Object::toString) + .collect(Collectors.toList()) + ) + ); + + public static OutputFormat get(String format) throws IllegalArgumentException { + return OutputFormat.valueOf(format); + } +} diff --git a/ksql-cli/src/main/java/io/confluent/ksql/util/CliUtils.java b/ksql-cli/src/main/java/io/confluent/ksql/util/CliUtils.java new file mode 100644 index 000000000000..e2e995827f36 --- /dev/null +++ b/ksql-cli/src/main/java/io/confluent/ksql/util/CliUtils.java @@ -0,0 +1,149 @@ +/** + * Copyright 2017 Confluent Inc. + **/ + +package io.confluent.ksql.util; + +import com.fasterxml.jackson.databind.JsonNode; +import com.fasterxml.jackson.databind.ObjectMapper; + +import io.confluent.ksql.exception.ExceptionUtil; +import io.confluent.ksql.rest.entity.PropertiesList; +import org.codehaus.jackson.JsonParseException; + +import java.io.BufferedReader; +import java.io.FileReader; +import java.io.IOException; +import java.net.ConnectException; +import java.nio.file.Files; +import java.nio.file.Path; +import java.nio.file.Paths; +import java.util.Map; +import java.util.Optional; +import java.util.regex.Matcher; +import java.util.regex.Pattern; + +import io.confluent.ksql.ddl.DdlConfig; +import io.confluent.ksql.parser.AstBuilder; +import io.confluent.ksql.parser.SqlBaseParser; +import io.confluent.ksql.parser.tree.RegisterTopic; +import org.slf4j.Logger; +import org.slf4j.LoggerFactory; + +public class CliUtils { + + private static final Logger LOGGER = LoggerFactory.getLogger(CliUtils.class); + + public Optional getAvroSchemaIfAvroTopic(SqlBaseParser.RegisterTopicContext + registerTopicContext) { + AstBuilder astBuilder = new AstBuilder(null); + RegisterTopic registerTopic = (RegisterTopic) astBuilder.visitRegisterTopic(registerTopicContext); + if (registerTopic.getProperties().get(DdlConfig.VALUE_FORMAT_PROPERTY) == null) { + throw new KsqlException("VALUE_FORMAT is not set for the topic."); + } + if (registerTopic.getProperties().get(DdlConfig.VALUE_FORMAT_PROPERTY).toString() + .equalsIgnoreCase("'AVRO'")) { + if (registerTopic.getProperties().containsKey(DdlConfig.AVRO_SCHEMA_FILE)) { + String avroSchema = getAvroSchema(AstBuilder.unquote(registerTopic.getProperties() + .get(DdlConfig.AVRO_SCHEMA_FILE) + .toString(), "'")); + return Optional.of(avroSchema); + } else { + throw new KsqlException("You need to provide avro schema file path for topics in avro format."); + } + } + return Optional.empty(); + } + + public String getAvroSchema(final String schemaFilePath) { + try { + byte[] jsonData = Files.readAllBytes(Paths.get(schemaFilePath)); + ObjectMapper objectMapper = new ObjectMapper(); + JsonNode root = objectMapper.readTree(jsonData); + return root.toString(); + } catch (JsonParseException e) { + throw new KsqlException("Could not parse the avro schema file."); + } catch (IOException e) { + throw new KsqlException("Could not read the avro schema file."); + } + } + + public String readQueryFile(final String queryFilePath) throws IOException { + StringBuilder sb = new StringBuilder(); + BufferedReader br = null; + try { + br = new BufferedReader(new FileReader(queryFilePath)); + String line = br.readLine(); + while (line != null) { + sb.append(line); + sb.append(System.lineSeparator()); + line = br.readLine(); + } + } catch (IOException e) { + throw new KsqlException("Could not read the query file."); + } finally { + if (br != null) { + br.close(); + } + } + return sb.toString(); + } + + public static String getErrorMessage(Throwable e) { + if (e instanceof ConnectException) { + return "Could not connect to the server."; + } else { + return e.getMessage(); + } + } + + public static PropertiesList propertiesListWithOverrides(PropertiesList propertiesList, Map localProperties) { + Map properties = propertiesList.getProperties(); + for (Map.Entry localPropertyEntry : localProperties.entrySet()) { + properties.put( + "(LOCAL OVERRIDE) " + localPropertyEntry.getKey(), + localPropertyEntry.getValue() + ); + } + return new PropertiesList(propertiesList.getStatementText(), properties); + } + + private static final Pattern QUOTED_PROMPT_PATTERN = Pattern.compile("'(''|[^'])*'"); + + private String parsePromptString(String commandStrippedLine) { + if (commandStrippedLine.trim().isEmpty()) { + throw new RuntimeException("Prompt command must be followed by a new prompt to use"); + } + + String trimmedLine = commandStrippedLine.trim().replace("%", "%%"); + if (trimmedLine.contains("'")) { + Matcher quotedPromptMatcher = QUOTED_PROMPT_PATTERN.matcher(trimmedLine); + if (quotedPromptMatcher.matches()) { + return trimmedLine.substring(1, trimmedLine.length() - 1).replace("''", "'"); + } else { + throw new RuntimeException( + "Failed to parse prompt string. All non-enclosing single quotes must be doubled." + ); + } + } else { + return trimmedLine; + } + } + + public static String getLocalServerAddress(int portNumber) { + return String.format("http://localhost:%d", portNumber); + } + + public static boolean createFile(Path path) { + try { + Files.createDirectories(path.getParent()); + if (Files.notExists(path)) { + Files.createFile(path); + } + return true; + } catch (Exception e) { + LOGGER.error(ExceptionUtil.stackTraceToString(e)); + return false; + } + } +} diff --git a/ksql-cli/src/main/java/io/confluent/ksql/util/TimestampLogFileAppender.java b/ksql-cli/src/main/java/io/confluent/ksql/util/TimestampLogFileAppender.java new file mode 100644 index 000000000000..003f41d54e0c --- /dev/null +++ b/ksql-cli/src/main/java/io/confluent/ksql/util/TimestampLogFileAppender.java @@ -0,0 +1,23 @@ +/** + * Copyright 2017 Confluent Inc. + **/ + +package io.confluent.ksql.util; + +import java.text.SimpleDateFormat; +import java.util.Date; +import org.apache.log4j.FileAppender; + +public class TimestampLogFileAppender extends FileAppender { + + @Override + public void setFile(String fileName) { + if (fileName.contains("%timestamp")) { + Date d = new Date(); + SimpleDateFormat format = new SimpleDateFormat("yyMMdd-HHmmss"); + fileName = fileName.replaceAll("%timestamp", format.format(d)); + } + super.setFile(fileName); + } + +} \ No newline at end of file diff --git a/ksql-cli/src/main/resources/log4j.properties b/ksql-cli/src/main/resources/log4j.properties new file mode 100644 index 000000000000..7158bbd55664 --- /dev/null +++ b/ksql-cli/src/main/resources/log4j.properties @@ -0,0 +1,11 @@ +# For the general syntax of property based configuration files see +# the documentation of org.apache.log4j.PropertyConfigurator. + +log4j.rootLogger=WARN, default.file + +log4j.appender.default.file=io.confluent.ksql.util.TimestampLogFileAppender +log4j.appender.default.file.ImmediateFlush=true +log4j.appender.default.file.append=false +log4j.appender.default.file.file=/tmp/ksql-logs/cli-%timestamp.log +log4j.appender.default.file.layout=org.apache.log4j.PatternLayout +log4j.appender.default.file.layout.ConversionPattern=[%d] %p %m (%c:%L)%n \ No newline at end of file diff --git a/ksql-cli/src/test/java/io/confluent/ksql/CliTest.java b/ksql-cli/src/test/java/io/confluent/ksql/CliTest.java new file mode 100644 index 000000000000..95fd7e2a17f1 --- /dev/null +++ b/ksql-cli/src/test/java/io/confluent/ksql/CliTest.java @@ -0,0 +1,359 @@ +package io.confluent.ksql; + +import io.confluent.ksql.cli.LocalCli; +import io.confluent.ksql.cli.console.OutputFormat; +import io.confluent.ksql.physical.GenericRow; +import io.confluent.ksql.rest.client.KsqlRestClient; +import io.confluent.ksql.rest.server.KsqlRestApplication; +import io.confluent.ksql.rest.server.KsqlRestConfig; +import io.confluent.ksql.testutils.EmbeddedSingleNodeKafkaCluster; +import io.confluent.ksql.util.CliUtils; +import io.confluent.ksql.util.OrderDataProvider; +import io.confluent.ksql.util.TestDataProvider; +import io.confluent.ksql.util.TopicConsumer; +import io.confluent.ksql.util.TopicProducer; +import org.apache.kafka.clients.consumer.ConsumerConfig; +import org.apache.kafka.common.serialization.StringDeserializer; +import org.apache.kafka.connect.data.Schema; +import org.apache.kafka.connect.data.SchemaBuilder; +import org.junit.AfterClass; +import org.junit.Assert; +import org.junit.BeforeClass; +import org.junit.ClassRule; +import org.junit.Test; + +import java.util.Arrays; +import java.util.HashMap; +import java.util.List; +import java.util.Map; +import java.util.Properties; + +import static io.confluent.ksql.TestResult.*; +import static io.confluent.ksql.util.KsqlConfig.*; +import static io.confluent.ksql.util.KsqlTestUtil.assertExpectedResults; + +/** + * Most tests in CliTest are end-to-end integration tests, so it may expect a long running time. + */ +public class CliTest extends TestRunner { + + @ClassRule + public static final EmbeddedSingleNodeKafkaCluster CLUSTER = new EmbeddedSingleNodeKafkaCluster(); + + private static final String COMMANDS_KSQL_TOPIC_NAME = KsqlRestApplication.COMMANDS_KSQL_TOPIC_NAME; + private static final int PORT = 9098; + private static final String LOCAL_REST_SERVER_ADDR = "http://localhost:" + PORT; + private static final OutputFormat CLI_OUTPUT_FORMAT = OutputFormat.TABULAR; + + private static final long STREAMED_QUERY_ROW_LIMIT = 10000; + private static final long STREAMED_QUERY_TIMEOUT_MS = 10000; + + private static final TestResult.OrderedResult EMPTY_RESULT = build(""); + + private static LocalCli localCli; + private static TestTerminal terminal; + private static String commandTopicName; + private static TopicProducer topicProducer; + private static TopicConsumer topicConsumer; + + private static OrderDataProvider orderDataProvider; + + @BeforeClass + public static void setUp() throws Exception { + KsqlRestClient restClient = new KsqlRestClient(LOCAL_REST_SERVER_ADDR); + + // TODO: Fix Properties Setup in Local().getCli() + // Local local = new Local().getCli(); + // LocalCli localCli = local.getCli(restClient, terminal); + + // TODO: add remote cli test cases + terminal = new TestTerminal(CLI_OUTPUT_FORMAT, restClient); + + KsqlRestConfig restServerConfig = new KsqlRestConfig(defaultServerProperties()); + commandTopicName = restServerConfig.getCommandTopic(); + + KsqlRestApplication restServer = KsqlRestApplication.buildApplication(restServerConfig, false); + restServer.start(); + + localCli = new LocalCli( + STREAMED_QUERY_ROW_LIMIT, + STREAMED_QUERY_TIMEOUT_MS, + restClient, + terminal, + restServer + ); + + TestRunner.setup(localCli, terminal); + + topicProducer = new TopicProducer(CLUSTER); + topicConsumer = new TopicConsumer(CLUSTER); + + // Test list or show commands before any custom topics created. + testListOrShowCommands(); + + orderDataProvider = new OrderDataProvider(); + restServer.getKsqlEngine().getKafkaTopicClient().createTopic(orderDataProvider.topicName(), 1, (short)1); + produceInputStream(orderDataProvider); + } + + private static void produceInputStream(TestDataProvider dataProvider) throws Exception { + createKStream(dataProvider); + topicProducer.produceInputData(dataProvider); + } + + private static void createKStream(TestDataProvider dataProvider) { + test( + String.format("CREATE STREAM %s %s WITH (value_format = 'json', kafka_topic = '%s' , key='%s')", + dataProvider.kstreamName(), dataProvider.ksqlSchemaString(), dataProvider.topicName(), dataProvider.key()), + build("Stream created") + ); + } + + private static void testListOrShowCommands() { + testListOrShow("topics", build(commandTopicName, true, 1, 1)); + testListOrShow("registered topics", build(COMMANDS_KSQL_TOPIC_NAME, commandTopicName, "JSON")); + testListOrShow("streams", EMPTY_RESULT); + testListOrShow("tables", EMPTY_RESULT); + testListOrShow("queries", EMPTY_RESULT); + } + + @AfterClass + public static void tearDown() throws Exception { + // If WARN NetworkClient:589 - Connection to node -1 could not be established. Broker may not be available. + // It may be due to not closing the resource. + // ksqlEngine.close(); + System.out.println("[Terminal Output]"); + System.out.println(terminal.getOutputString()); + + localCli.close(); + terminal.close(); + } + + private static Map genDefaultConfigMap() { + Map configMap = new HashMap<>(); + configMap.put(ConsumerConfig.BOOTSTRAP_SERVERS_CONFIG, CLUSTER.bootstrapServers()); + configMap.put(KsqlRestConfig.LISTENERS_CONFIG, CliUtils.getLocalServerAddress(PORT)); + configMap.put("application.id", "KSQL"); + configMap.put("commit.interval.ms", 0); + configMap.put("cache.max.bytes.buffering", 0); + configMap.put("auto.offset.reset", "earliest"); + configMap.put("ksql.command.topic.suffix", "commands"); + + return configMap; + } + + private static Properties defaultServerProperties() { + Properties serverProperties = new Properties(); + serverProperties.putAll(genDefaultConfigMap()); + return serverProperties; + } + + private static Map validStartUpConfigs() { + // TODO: these configs should be set with other configs on start-up, rather than setup later. + Map startConfigs = genDefaultConfigMap(); + startConfigs.put("num.stream.threads", 4); + + startConfigs.put(SINK_NUMBER_OF_REPLICATIONS, 1); + startConfigs.put(SINK_NUMBER_OF_PARTITIONS, 4); + startConfigs.put(SINK_WINDOW_CHANGE_LOG_ADDITIONAL_RETENTION, 1000000); + + startConfigs.put(KSQL_TRANSIENT_QUERY_NAME_PREFIX_CONFIG, KSQL_TRANSIENT_QUERY_NAME_PREFIX_DEFAULT); + startConfigs.put(KSQL_SERVICE_ID_CONFIG, KSQL_SERVICE_ID_DEFAULT); + startConfigs.put(KSQL_TABLE_STATESTORE_NAME_SUFFIX_CONFIG, KSQL_TABLE_STATESTORE_NAME_SUFFIX_DEFAULT); + startConfigs.put(KSQL_PERSISTENT_QUERY_NAME_PREFIX_CONFIG, KSQL_PERSISTENT_QUERY_NAME_PREFIX_DEFAULT); + + return startConfigs; + } + + private static void testCreateStreamAsSelect(String selectQuery, Schema resultSchema, Map expectedResults) throws Exception { + if (!selectQuery.endsWith(";")) { + selectQuery += ";"; + } + String resultKStreamName = "RESULT"; + String resultTopicName = resultKStreamName; + final String queryString = "CREATE STREAM " + resultKStreamName + " AS " + selectQuery; + + /* Start Stream Query */ + test(queryString, build("Stream created and running")); + + /* Assert Results */ + Map results = topicConsumer.readResults(resultTopicName, resultSchema, expectedResults.size(), new StringDeserializer()); + Assert.assertEquals(expectedResults.size(), results.size()); + assertExpectedResults(results, expectedResults); + + /* Get first column of the first row in the result set to obtain the queryID */ + String queryID = (String) ((List) run("list queries").data.toArray()[0]).get(0); + + /* Clean Up */ + run("terminate query " + queryID); + dropStream(resultKStreamName); + } + + private static void dropStream(String name) { + test( + String.format("drop stream %s", name), + build("Source " + name + " was dropped") + ); + } + + @Test + public void testPropertySetUnset() { + test("set 'application.id' = 'Test_App'", EMPTY_RESULT); + test("set 'producer.batch.size' = '16384'", EMPTY_RESULT); + test("set 'max.request.size' = '1048576'", EMPTY_RESULT); + test("set 'consumer.max.poll.records' = '500'", EMPTY_RESULT); + test("set 'enable.auto.commit' = 'true'", EMPTY_RESULT); + test("set 'AVROSCHEMA' = 'schema'", EMPTY_RESULT); + + test("unset 'application.id'", EMPTY_RESULT); + test("unset 'producer.batch.size'", EMPTY_RESULT); + test("unset 'max.request.size'", EMPTY_RESULT); + test("unset 'consumer.max.poll.records'", EMPTY_RESULT); + test("unset 'enable.auto.commit'", EMPTY_RESULT); + test("unset 'AVROSCHEMA'", EMPTY_RESULT); + + testListOrShow("properties", build(validStartUpConfigs()), false); + } + + @Test + public void testDescribe() { + test("describe topic " + COMMANDS_KSQL_TOPIC_NAME, + build(COMMANDS_KSQL_TOPIC_NAME, commandTopicName, "JSON")); + } + + @Test + public void testSelectStar() throws Exception { + testCreateStreamAsSelect( + "SELECT * FROM " + orderDataProvider.kstreamName(), + orderDataProvider.schema(), + orderDataProvider.data() + ); + } + + @Test + public void testSelectProject() throws Exception { + Map expectedResults = new HashMap<>(); + expectedResults.put("1", new GenericRow(Arrays.asList("ITEM_1", 10.0, new + Double[]{100.0, + 110.99, + 90.0 }))); + expectedResults.put("2", new GenericRow(Arrays.asList("ITEM_2", 20.0, new + Double[]{10.0, + 10.99, + 9.0 }))); + + expectedResults.put("3", new GenericRow(Arrays.asList("ITEM_3", 30.0, new + Double[]{10.0, + 10.99, + 91.0 }))); + + expectedResults.put("4", new GenericRow(Arrays.asList("ITEM_4", 40.0, new + Double[]{10.0, + 140.99, + 94.0 }))); + + expectedResults.put("5", new GenericRow(Arrays.asList("ITEM_5", 50.0, new + Double[]{160.0, + 160.99, + 98.0 }))); + + expectedResults.put("6", new GenericRow(Arrays.asList("ITEM_6", 60.0, new + Double[]{1000.0, + 1100.99, + 900.0 }))); + + expectedResults.put("7", new GenericRow(Arrays.asList("ITEM_7", 70.0, new + Double[]{1100.0, + 1110.99, + 190.0 }))); + + expectedResults.put("8", new GenericRow(Arrays.asList("ITEM_8", 80.0, new + Double[]{1100.0, + 1110.99, + 970.0 }))); + + Schema resultSchema = SchemaBuilder.struct() + .field("ITEMID", SchemaBuilder.STRING_SCHEMA) + .field("ORDERUNITS", SchemaBuilder.FLOAT64_SCHEMA) + .field("PRICEARRAY", SchemaBuilder.array(SchemaBuilder.FLOAT64_SCHEMA)) + .build(); + + testCreateStreamAsSelect( + "SELECT ITEMID, ORDERUNITS, PRICEARRAY FROM " + orderDataProvider.kstreamName(), + resultSchema, + expectedResults + ); + } + + @Test + public void testSelectFilter() throws Exception { + Map expectedResults = new HashMap<>(); + Map mapField = new HashMap<>(); + mapField.put("key1", 1.0); + mapField.put("key2", 2.0); + mapField.put("key3", 3.0); + expectedResults.put("8", new GenericRow(Arrays.asList(8, "ORDER_6", + "ITEM_8", 80.0, new + Double[]{1100.0, + 1110.99, + 970.0 }, + mapField))); + + testCreateStreamAsSelect( + "SELECT * FROM " + orderDataProvider.kstreamName() + " WHERE ORDERUNITS > 20 AND ITEMID = 'ITEM_8'", + orderDataProvider.schema(), + expectedResults + ); + } + + @Test + public void testSelectUDFs() throws Exception { + final String selectColumns = + "ITEMID, ORDERUNITS*10, PRICEARRAY[0]+10, KEYVALUEMAP['key1']*KEYVALUEMAP['key2']+10, PRICEARRAY[1]>1000"; + final String whereClause = "ORDERUNITS > 20 AND ITEMID LIKE '%_8'"; + + final String queryString = String.format( + "SELECT %s FROM %s WHERE %s;", + selectColumns, + orderDataProvider.kstreamName(), + whereClause + ); + + Map expectedResults = new HashMap<>(); + expectedResults.put("8", new GenericRow(Arrays.asList("ITEM_8", 800.0, 1110.0, 12.0, true))); + + // TODO: tests failed! + // testCreateStreamAsSelect(queryString, orderDataProvider.schema(), expectedResults); + } + + // =================================================================== + // Below Tests are only used for coverage, not for results validation. + // =================================================================== + + @Test + public void testRunInteractively() { + localCli.runInteractively(); + } + + @Test + public void testEmptyInput() throws Exception { + localCli.runNonInteractively(""); + } + + @Test + public void testExitCommand() throws Exception { + localCli.runNonInteractively("exit"); + localCli.runNonInteractively("\nexit\n\n\n"); + localCli.runNonInteractively("exit\nexit\nexit"); + localCli.runNonInteractively("\n\nexit\nexit\n\n\n\nexit\n\n\n"); + } + + @Test + public void testExtraCommands() throws Exception { + localCli.runNonInteractively("help"); + localCli.runNonInteractively("version"); + localCli.runNonInteractively("output"); + localCli.runNonInteractively("clear"); + } + +} \ No newline at end of file diff --git a/ksql-cli/src/test/java/io/confluent/ksql/CliTestFailedException.java b/ksql-cli/src/test/java/io/confluent/ksql/CliTestFailedException.java new file mode 100644 index 000000000000..3249b4ae7608 --- /dev/null +++ b/ksql-cli/src/test/java/io/confluent/ksql/CliTestFailedException.java @@ -0,0 +1,13 @@ +/** + * Copyright 2017 Confluent Inc. + **/ + +package io.confluent.ksql; + +public class CliTestFailedException extends RuntimeException { + + public CliTestFailedException(Throwable cause) { + super(cause); + } + +} diff --git a/ksql-cli/src/test/java/io/confluent/ksql/FakeException.java b/ksql-cli/src/test/java/io/confluent/ksql/FakeException.java new file mode 100644 index 000000000000..fb0b3afc7aa1 --- /dev/null +++ b/ksql-cli/src/test/java/io/confluent/ksql/FakeException.java @@ -0,0 +1,18 @@ +/** + * Copyright 2017 Confluent Inc. + **/ + +package io.confluent.ksql; + +public class FakeException extends RuntimeException { + + @Override + public String getMessage() { + return "This Exception is only used for verifying exception prints. It doesn't mean anything goes wrong."; + } + + @Override + public StackTraceElement[] getStackTrace() { + return new StackTraceElement[0]; + } +} diff --git a/ksql-cli/src/test/java/io/confluent/ksql/TestLineReader.java b/ksql-cli/src/test/java/io/confluent/ksql/TestLineReader.java new file mode 100644 index 000000000000..587edee07b60 --- /dev/null +++ b/ksql-cli/src/test/java/io/confluent/ksql/TestLineReader.java @@ -0,0 +1,24 @@ +/** + * Copyright 2017 Confluent Inc. + **/ + +package io.confluent.ksql; + +import io.confluent.ksql.cli.console.LineReader; +import org.jline.reader.History; + +import java.io.IOException; +import java.util.ArrayList; + +public class TestLineReader implements LineReader { + + @Override + public String readLine() throws IOException { + return null; + } + + @Override + public Iterable getHistory() { + return new ArrayList<>(); + } +} diff --git a/ksql-cli/src/test/java/io/confluent/ksql/TestResult.java b/ksql-cli/src/test/java/io/confluent/ksql/TestResult.java new file mode 100644 index 000000000000..a110f63d0cac --- /dev/null +++ b/ksql-cli/src/test/java/io/confluent/ksql/TestResult.java @@ -0,0 +1,122 @@ +/** + * Copyright 2017 Confluent Inc. + **/ + +package io.confluent.ksql; + +import io.confluent.ksql.physical.GenericRow; +import io.confluent.ksql.util.StringUtil; + +import java.util.ArrayList; +import java.util.Arrays; +import java.util.Collection; +import java.util.HashSet; +import java.util.List; +import java.util.Map; +import java.util.Objects; +import java.util.TreeMap; + +public abstract class TestResult { + + private static final String LINE_SEPARATOR = ", "; + + Collection> data; + private boolean sealed = false; + + static class OrderedResult extends TestResult { + private OrderedResult() { + data = new ArrayList<>(); + } + + private OrderedResult(String singleRow) { + this(); + if (singleRow.length() > 0) { + data.add(Arrays.asList(singleRow.split(LINE_SEPARATOR))); + } + seal(); + } + + @Override + public String toString() { + return data.toString(); + } + } + + static class UnorderedResult extends TestResult { + private UnorderedResult() { + data = new HashSet<>(); + } + + private UnorderedResult(Map map) { + this(); + for (Map.Entry kv : map.entrySet()) { + data.add(Arrays.asList(kv.getKey(), String.valueOf(kv.getValue()))); + } + seal(); + } + + @Override + public String toString() { + // for convenience, we show content ordered by first column (key) alphabetically + TreeMap map = new TreeMap<>(); + for (List entry: data) { + map.put(entry.get(0), entry); + } + return map.values().toString(); + } + } + + static UnorderedResult build(Map map) { + return new UnorderedResult(map); + } + + static OrderedResult build(String singleRow) { + return new OrderedResult(singleRow); + } + + static OrderedResult build(Object... cols) { + return new OrderedResult(StringUtil.join(", ", Arrays.asList(cols))); + } + + static TestResult init(boolean requireOrder) { + return requireOrder ? new OrderedResult() : new UnorderedResult(); + } + + void addRow(GenericRow row) { + if (sealed) { + throw new RuntimeException("TestResult already sealed, cannot add more rows to it."); + } + + List newRow = new ArrayList<>(); + for (Object column : row.getColumns()) { + newRow.add(String.valueOf(column)); + } + + data.add(newRow); + } + + void addRows(List> rows) { + if (sealed) { + throw new RuntimeException("TestResult already sealed, cannot add more rows to it."); + } + + data.addAll(rows); + } + + void seal() { + this.sealed = true; + } + + @Override + public boolean equals(Object o) { + if (this == o) return true; + if (o == null || getClass() != o.getClass()) return false; + TestResult that = (TestResult) o; + return Objects.equals(data, that.data); + } + + @Override + public int hashCode() { + return Objects.hash(data); + } +} diff --git a/ksql-cli/src/test/java/io/confluent/ksql/TestRunner.java b/ksql-cli/src/test/java/io/confluent/ksql/TestRunner.java new file mode 100644 index 000000000000..c55c3c0b7670 --- /dev/null +++ b/ksql-cli/src/test/java/io/confluent/ksql/TestRunner.java @@ -0,0 +1,64 @@ +/** + * Copyright 2017 Confluent Inc. + **/ + +package io.confluent.ksql; + +import io.confluent.ksql.cli.LocalCli; +import org.junit.Assert; +import org.slf4j.Logger; +import org.slf4j.LoggerFactory; + +import java.util.Objects; + +public abstract class TestRunner { + + private static final Logger LOGGER = LoggerFactory.getLogger(TestRunner.class); + + private static LocalCli localCli; + private static TestTerminal testTerminal; + + public static void setup(LocalCli localCli, TestTerminal testTerminal) { + Objects.requireNonNull(localCli); + Objects.requireNonNull(testTerminal); + TestRunner.localCli = localCli; + TestRunner.testTerminal = testTerminal; + } + + protected static void testListOrShow(String commandSuffix, TestResult.OrderedResult expectedResult) { + testListOrShow(commandSuffix, expectedResult, true); + } + + protected static void testListOrShow(String commandSuffix, TestResult expectedResult, boolean requireOrder) { + test("list " + commandSuffix, expectedResult, requireOrder); + test("show " + commandSuffix, expectedResult, requireOrder); + } + + protected static void test(String command, TestResult.OrderedResult expectedResult) { + test(command, expectedResult, true); + } + + protected static void test(String command, TestResult expectedResult, boolean requireOrder) { + TestResult actual = run(command, requireOrder); + Assert.assertEquals(expectedResult, actual); + } + + protected static TestResult run(String command, boolean requireOrder) throws CliTestFailedException { + try { + if (!command.endsWith(";")) { + command += ";"; + } + System.out.println("[Run Command] " + command); + testTerminal.resetTestResult(requireOrder); + localCli.handleLine(command); + return testTerminal.getTestResult(); + } catch (Exception e) { + throw new CliTestFailedException(e); + } + } + + protected static TestResult run(String command) throws CliTestFailedException { + return run(command, false); + } + +} \ No newline at end of file diff --git a/ksql-cli/src/test/java/io/confluent/ksql/TestTerminal.java b/ksql-cli/src/test/java/io/confluent/ksql/TestTerminal.java new file mode 100644 index 000000000000..22cb77f04639 --- /dev/null +++ b/ksql-cli/src/test/java/io/confluent/ksql/TestTerminal.java @@ -0,0 +1,92 @@ +/** + * Copyright 2017 Confluent Inc. + **/ + +package io.confluent.ksql; + +import io.confluent.ksql.cli.console.Console; +import io.confluent.ksql.cli.console.OutputFormat; +import io.confluent.ksql.physical.GenericRow; + +import io.confluent.ksql.rest.client.KsqlRestClient; +import org.jline.terminal.Terminal; +import org.jline.utils.InfoCmp; + +import java.io.IOException; +import java.io.PrintWriter; +import java.io.StringWriter; +import java.util.List; + +public class TestTerminal extends Console { + + private final PrintWriter printWriter; + private final StringWriter writer; + private TestResult output; + + public TestTerminal(OutputFormat outputFormat, KsqlRestClient restClient) { + super(outputFormat, restClient); + + this.writer = new StringWriter(); + this.printWriter = new PrintWriter(writer); + + resetTestResult(true); + } + + public void resetTestResult(boolean requireOrder) { + output = TestResult.init(requireOrder); + } + + public TestResult getTestResult() { + return output; + } + + public String getOutputString() { + return writer.toString(); + } + + @Override + public void addResult(GenericRow row) { + output.addRow(row); + } + + @Override + public void addResult(List columnHeaders, List> rows) { + output.addRows(rows); + } + + @Override + public PrintWriter writer() { + return printWriter; + } + + @Override + public int getWidth() { + return 100; + } + + @Override + public void flush() { + printWriter.flush(); + } + + @Override + public void close() throws IOException { + printWriter.close(); + } + + @Override + protected TestLineReader buildLineReader() { + return new TestLineReader(); + } + + @Override + protected void puts(InfoCmp.Capability capability) { + // Ignore + } + + @Override + public Terminal.SignalHandler handle(Terminal.Signal signal, Terminal.SignalHandler signalHandler) { + // Ignore + return null; + } +} diff --git a/ksql-cli/src/test/java/io/confluent/ksql/cli/console/ConsoleTest.java b/ksql-cli/src/test/java/io/confluent/ksql/cli/console/ConsoleTest.java new file mode 100644 index 000000000000..d7c30aca8bcb --- /dev/null +++ b/ksql-cli/src/test/java/io/confluent/ksql/cli/console/ConsoleTest.java @@ -0,0 +1,123 @@ +/** + * Copyright 2017 Confluent Inc. + **/ + +package io.confluent.ksql.cli.console; + +import io.confluent.ksql.FakeException; +import io.confluent.ksql.TestTerminal; +import io.confluent.ksql.metastore.DataSource; +import io.confluent.ksql.physical.GenericRow; +import io.confluent.ksql.rest.client.KsqlRestClient; +import io.confluent.ksql.rest.entity.CommandStatusEntity; +import io.confluent.ksql.rest.entity.ErrorMessageEntity; +import io.confluent.ksql.rest.entity.ExecutionPlan; +import io.confluent.ksql.rest.entity.KafkaTopicInfo; +import io.confluent.ksql.rest.entity.KafkaTopicsList; +import io.confluent.ksql.rest.entity.KsqlEntityList; +import io.confluent.ksql.rest.entity.KsqlTopicInfo; +import io.confluent.ksql.rest.entity.KsqlTopicsList; +import io.confluent.ksql.rest.entity.PropertiesList; +import io.confluent.ksql.rest.entity.Queries; +import io.confluent.ksql.rest.entity.SourceDescription; +import io.confluent.ksql.rest.entity.StreamedRow; +import io.confluent.ksql.rest.entity.StreamsList; +import io.confluent.ksql.rest.entity.TablesList; +import io.confluent.ksql.rest.entity.TopicDescription; +import io.confluent.ksql.util.SchemaUtil; +import org.apache.kafka.connect.data.Field; +import org.apache.kafka.connect.data.Schema; +import org.apache.kafka.connect.data.SchemaBuilder; +import org.junit.After; +import org.junit.Test; +import org.junit.runner.RunWith; +import org.junit.runners.Parameterized; + +import java.io.IOException; +import java.util.ArrayList; +import java.util.Arrays; +import java.util.Collection; +import java.util.HashMap; +import java.util.List; +import java.util.Map; +import java.util.stream.Collectors; + +@RunWith(Parameterized.class) +public class ConsoleTest { + + private TestTerminal terminal; + private KsqlRestClient client; + + @Parameterized.Parameters(name = "{0}") + public static Collection data() { + return Arrays.asList("JSON", "TABULAR"); + } + + public ConsoleTest(String outputFormat) { + client = new KsqlRestClient("http://localhost:59098"); + terminal = new TestTerminal(OutputFormat.valueOf(outputFormat), client); + terminal.setOutputFormat(outputFormat); + } + + @After + public void after() throws Exception { + client.close(); + terminal.close(); + } + + @Test + public void testPrintGenericStreamedRow() throws IOException { + StreamedRow row = new StreamedRow(new GenericRow(Arrays.asList("col_1", "col_2"))); + terminal.printStreamedRow(row); + } + + @Test + public void testPrintErrorStreamedRow() throws IOException { + StreamedRow row = new StreamedRow(new FakeException()); + terminal.printStreamedRow(row); + } + + @Test + public void testPrintKSqlEntityList() throws IOException { + Map properties = new HashMap<>(); + properties.put("k1", 1); + properties.put("k2", "v2"); + properties.put("k3", true); + + List queries = new ArrayList<>(); + queries.add(new Queries.RunningQuery("select * from t1", "TestTopic", 1)); + + for (int i = 0; i < 5; i++) { + KsqlEntityList entityList = new KsqlEntityList(Arrays.asList( + new CommandStatusEntity("e", "topic/1", "SUCCESS", "Success Message"), + new ErrorMessageEntity("e", new FakeException()), + new PropertiesList("e", properties), + new Queries("e", queries), + new SourceDescription("e", "TestSource", buildTestSchema(i), DataSource.DataSourceType.KTABLE, "key", "2000-01-01"), + new TopicDescription("e", "TestTopic", "TestKafkaTopic", "AVRO", "schemaString"), + new StreamsList("e", Arrays.asList(new StreamsList.StreamInfo("TestStream", "TestTopic", "AVRO"))), + new TablesList("e", Arrays.asList(new TablesList.TableInfo("TestTable", "TestTopic", "JSON", false))), + new KsqlTopicsList("e", Arrays.asList(new KsqlTopicInfo("TestTopic", "TestKafkaTopic", DataSource.DataSourceSerDe.JSON))), + new KafkaTopicsList("e", Arrays.asList(new KafkaTopicInfo("TestKafkaTopic", "true", "1", "1"))), + new ExecutionPlan("Test Execution Plan") + )); + terminal.printKsqlEntityList(entityList); + } + } + + private List buildTestSchema(int size) { + SchemaBuilder dataSourceBuilder = SchemaBuilder.struct().name("TestSchema"); + for (int i = 0; i < size; i++) { + dataSourceBuilder.field("f_" + i, SchemaUtil.getTypeSchema("STRING")); + } + + List res = new ArrayList<>(); + List fields = dataSourceBuilder.build().fields(); + for (Field field : fields) { + res.add(new SourceDescription.FieldSchemaInfo(field.name(), SchemaUtil.getSchemaFieldName(field))); + } + + return res; + } + +} \ No newline at end of file diff --git a/ksql-core/pom.xml b/ksql-core/pom.xml new file mode 100644 index 000000000000..6cde5cfa14aa --- /dev/null +++ b/ksql-core/pom.xml @@ -0,0 +1,188 @@ + + + 4.0.0 + + + io.confluent.ksql + ksql-parent + 0.1-SNAPSHOT + + + ksql-core + + + + com.google.code.findbugs + jsr305 + + + + com.google.guava + guava + + + + io.airlift + slice + + + + junit + junit + + + + org.antlr + antlr4-runtime + + + + org.apache.avro + avro + + + + org.apache.commons + commons-csv + + + + org.apache.kafka + kafka_${kafka.scala.version} + + + + org.apache.kafka + connect-api + + + + org.apache.kafka + connect-json + + + + org.apache.kafka + kafka-clients + + + + org.apache.kafka + kafka-streams + + + + org.codehaus.janino + janino + + + + + org.apache.curator + curator-test + 2.9.0 + test + + + + org.apache.kafka + kafka_${kafka.scala.version} + test + test + + + + org.apache.kafka + kafka-clients + test + test + + + + + + + org.antlr + antlr4-maven-plugin + ${antlr.version} + + + + true + + + + + antlr4 + + + + + + + org.apache.maven.plugins + maven-surefire-plugin + + 1 + false + + + + + net.alchim31.maven + scala-maven-plugin + 3.2.1 + + + + -Xexperimental + + + + + + compile + testCompile + + + + + + + org.apache.maven.plugins + maven-jar-plugin + + + + test-jar + + + + + + + + + src/main/resources + true + + ksql-version.streamsProperties + + + + + src/main/resources + false + + ksql-version.streamsProperties + + + + + diff --git a/ksql-core/src/main/antlr4/io/confluent/ksql/parser/SqlBase.g4 b/ksql-core/src/main/antlr4/io/confluent/ksql/parser/SqlBase.g4 new file mode 100644 index 000000000000..7020964b308f --- /dev/null +++ b/ksql-core/src/main/antlr4/io/confluent/ksql/parser/SqlBase.g4 @@ -0,0 +1,745 @@ +/* + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + * This file is an adaptation of Presto's presto-parser/src/main/antlr4/com/facebook/presto/sql/parser/SqlBase.g4 grammar. + */ + +grammar SqlBase; + +tokens { + DELIMITER +} + +statements + : (singleStatement)* EOF + ; + +singleStatement + : statement ';' + ; + +singleExpression + : expression EOF + ; + +statement + : query #querystatement + | (LIST | SHOW) PROPERTIES #listProperties + | (LIST | SHOW) TOPICS #listTopics + | (LIST | SHOW) REGISTERED TOPICS #listRegisteredTopics + | (LIST | SHOW) STREAMS #listStreams + | (LIST | SHOW) TABLES #listTables + | DESCRIBE (qualifiedName | TOPIC qualifiedName) #showColumns + | PRINT qualifiedName (FROM BEGINNING)? ((INTERVAL | SAMPLE) number)? #printTopic + | (LIST | SHOW) QUERIES #listQueries + | TERMINATE QUERY? INTEGER_VALUE #terminateQuery + | SET STRING EQ STRING #setProperty + | UNSET STRING #unsetProperty + | LOAD expression #loadProperties + | REGISTER TOPIC (IF NOT EXISTS)? qualifiedName + (WITH tableProperties)? #registerTopic + | CREATE STREAM (IF NOT EXISTS)? qualifiedName + '(' tableElement (',' tableElement)* ')' + (WITH tableProperties)? #createStream + | CREATE STREAM (IF NOT EXISTS)? qualifiedName + (WITH tableProperties)? AS query + (PARTITION BY identifier)? #createStreamAs + | CREATE TABLE (IF NOT EXISTS)? qualifiedName + '(' tableElement (',' tableElement)* ')' + (WITH tableProperties)? #createTable + | CREATE TABLE (IF NOT EXISTS)? qualifiedName + (WITH tableProperties)? AS query #createTableAs + | DROP TOPIC (IF EXISTS)? qualifiedName #dropTopic + | DROP STREAM (IF EXISTS)? qualifiedName #dropStream + | DROP TABLE (IF EXISTS)? qualifiedName #dropTable + | EXPLAIN ANALYZE? + ('(' explainOption (',' explainOption)* ')')? statement #explain + | EXPORT CATALOG TO STRING #exportCatalog + | RUN SCRIPT STRING #runScript + ; + +query + : with? queryNoWith + ; + +with + : WITH RECURSIVE? namedQuery (',' namedQuery)* + ; + +tableElement + : identifier type + ; + +tableProperties + : '(' tableProperty (',' tableProperty)* ')' + ; + +tableProperty + : identifier EQ expression + ; + +queryNoWith: + queryTerm + (ORDER BY sortItem (',' sortItem)*)? + (LIMIT limit=(INTEGER_VALUE | ALL))? + (APPROXIMATE AT confidence=number CONFIDENCE)? + ; + +queryTerm + : queryPrimary #queryTermDefault + | left=queryTerm operator=INTERSECT setQuantifier? right=queryTerm #setOperation + | left=queryTerm operator=(UNION | EXCEPT) setQuantifier? right=queryTerm #setOperation + ; + +queryPrimary + : querySpecification #queryPrimaryDefault + | TABLE qualifiedName #table + | VALUES expression (',' expression)* #inlineTable + | '(' queryNoWith ')' #subquery + ; + +sortItem + : expression ordering=(ASC | DESC)? (NULLS nullOrdering=(FIRST | LAST))? + ; + +querySpecification + : SELECT STREAM? setQuantifier? selectItem (',' selectItem)* + (INTO into=relationPrimary)? + (FROM from=relation (',' relation)*)? + (WINDOW windowExpression)? + (WHERE where=booleanExpression)? + (GROUP BY groupBy)? + (HAVING having=booleanExpression)? + ; + +windowExpression + : (IDENTIFIER)? + ( tumblingWindowExpression | hoppingWindowExpression | sessionWindowExpression) + ; + +tumblingWindowExpression + : TUMBLING '(' SIZE number windowUnit')' + ; + +hoppingWindowExpression + : HOPPING '(' SIZE number windowUnit ',' ADVANCE BY number windowUnit ')' + ; + +sessionWindowExpression + : SESSION '(' number windowUnit ')' + ; + +windowUnit + : DAY + | HOUR + | MINUTE + | SECOND + | MILLISECOND + | DAYS + | HOURS + | MINUTES + | SECONDS + | MILLISECONDS + ; + +groupBy + : setQuantifier? groupingElement (',' groupingElement)* + ; + +groupingElement + : groupingExpressions #singleGroupingSet + | ROLLUP '(' (qualifiedName (',' qualifiedName)*)? ')' #rollup + | CUBE '(' (qualifiedName (',' qualifiedName)*)? ')' #cube + | GROUPING SETS '(' groupingSet (',' groupingSet)* ')' #multipleGroupingSets + ; + +groupingExpressions + : '(' (expression (',' expression)*)? ')' + | expression + ; + +groupingSet + : '(' (qualifiedName (',' qualifiedName)*)? ')' + | qualifiedName + ; + +namedQuery + : name=identifier (columnAliases)? AS '(' query ')' + ; + +setQuantifier + : DISTINCT + | ALL + ; + +selectItem + : expression (AS? identifier)? #selectSingle + | qualifiedName '.' ASTERISK #selectAll + | ASTERISK #selectAll + ; + + +relation + : left=relation + ( CROSS JOIN right=aliasedRelation + | joinType JOIN rightRelation=relation joinCriteria + | NATURAL joinType JOIN right=aliasedRelation + ) #joinRelation + | aliasedRelation #relationDefault + ; + +joinType + : INNER? + | LEFT OUTER? + | RIGHT OUTER? + | FULL OUTER? + ; + +joinCriteria + : ON booleanExpression + | USING '(' identifier (',' identifier)* ')' + ; + + +sampleType + : BERNOULLI + | SYSTEM + | POISSONIZED + ; + +aliasedRelation + : relationPrimary (AS? identifier columnAliases?)? + ; + +columnAliases + : '(' identifier (',' identifier)* ')' + ; + +relationPrimary + : + qualifiedName (WITH tableProperties)? #tableName + | '(' query ')' #subqueryRelation + | UNNEST '(' expression (',' expression)* ')' (WITH ORDINALITY)? #unnest + | '(' relation ')' #parenthesizedRelation + ; + +expression + : booleanExpression + ; + +booleanExpression + : predicated #booleanDefault + | NOT booleanExpression #logicalNot + | left=booleanExpression operator=AND right=booleanExpression #logicalBinary + | left=booleanExpression operator=OR right=booleanExpression #logicalBinary + ; + +// workaround for: +// https://github.com/antlr/antlr4/issues/780 +// https://github.com/antlr/antlr4/issues/781 +predicated + : valueExpression predicate[$valueExpression.ctx]? + ; + +predicate[ParserRuleContext value] + : comparisonOperator right=valueExpression #comparison + | NOT? BETWEEN lower=valueExpression AND upper=valueExpression #between + | NOT? IN '(' expression (',' expression)* ')' #inList + | NOT? IN '(' query ')' #inSubquery + | NOT? LIKE pattern=valueExpression (ESCAPE escape=valueExpression)? #like + | IS NOT? NULL #nullPredicate + | IS NOT? DISTINCT FROM right=valueExpression #distinctFrom + ; + +valueExpression + : primaryExpression #valueExpressionDefault + | valueExpression AT timeZoneSpecifier #atTimeZone + | operator=(MINUS | PLUS) valueExpression #arithmeticUnary + | left=valueExpression operator=(ASTERISK | SLASH | PERCENT) right=valueExpression #arithmeticBinary + | left=valueExpression operator=(PLUS | MINUS) right=valueExpression #arithmeticBinary + | left=valueExpression CONCAT right=valueExpression #concatenation + ; + +primaryExpression + : NULL #nullLiteral + | interval #intervalLiteral + | identifier STRING #typeConstructor + | number #numericLiteral + | booleanValue #booleanLiteral + | STRING #stringLiteral + | BINARY_LITERAL #binaryLiteral + | POSITION '(' valueExpression IN valueExpression ')' #position + | '(' expression (',' expression)+ ')' #rowConstructor + | ROW '(' expression (',' expression)* ')' #rowConstructor + | qualifiedName '(' ASTERISK ')' over? #functionCall + | qualifiedName '(' (setQuantifier? expression (',' expression)*)? ')' over? #functionCall + | identifier '->' expression #lambda + | '(' identifier (',' identifier)* ')' '->' expression #lambda + | '(' query ')' #subqueryExpression + // This is an extension to ANSI SQL, which considers EXISTS to be a + | EXISTS '(' query ')' #exists + | CASE valueExpression whenClause+ (ELSE elseExpression=expression)? END #simpleCase + | CASE whenClause+ (ELSE elseExpression=expression)? END #searchedCase + | CAST '(' expression AS type ')' #cast + | TRY_CAST '(' expression AS type ')' #cast + | ARRAY '[' (expression (',' expression)*)? ']' #arrayConstructor + | value=primaryExpression '[' index=valueExpression ']' #subscript + | identifier #columnReference + | base=primaryExpression '.' fieldName=identifier #dereference + | name=CURRENT_DATE #specialDateTimeFunction + | name=CURRENT_TIME ('(' precision=INTEGER_VALUE ')')? #specialDateTimeFunction + | name=CURRENT_TIMESTAMP ('(' precision=INTEGER_VALUE ')')? #specialDateTimeFunction + | name=LOCALTIME ('(' precision=INTEGER_VALUE ')')? #specialDateTimeFunction + | name=LOCALTIMESTAMP ('(' precision=INTEGER_VALUE ')')? #specialDateTimeFunction + | SUBSTRING '(' valueExpression FROM valueExpression (FOR valueExpression)? ')' #substring + | NORMALIZE '(' valueExpression (',' normalForm)? ')' #normalize + | EXTRACT '(' identifier FROM valueExpression ')' #extract + | '(' expression ')' #parenthesizedExpression + ; + +timeZoneSpecifier + : TIME ZONE interval #timeZoneInterval + | TIME ZONE STRING #timeZoneString + ; + +comparisonOperator + : EQ | NEQ | LT | LTE | GT | GTE + ; + +booleanValue + : TRUE | FALSE + ; + +interval + : INTERVAL sign=(PLUS | MINUS)? STRING from=intervalField (TO to=intervalField)? + ; + +intervalField + : YEAR | MONTH | DAY | HOUR | MINUTE | SECOND + ; + +type + : type ARRAY + | ARRAY '<' type '>' + | MAP '<' type ',' type '>' + | ROW '(' identifier type (',' identifier type)* ')' + | baseType ('(' typeParameter (',' typeParameter)* ')')? + ; + +typeParameter + : INTEGER_VALUE | type + ; + +baseType + : TIME_WITH_TIME_ZONE + | TIMESTAMP_WITH_TIME_ZONE + | identifier + ; + +whenClause + : WHEN condition=expression THEN result=expression + ; + +over + : OVER '(' + (PARTITION BY partition+=expression (',' partition+=expression)*)? + (ORDER BY sortItem (',' sortItem)*)? + windowFrame? + ')' + ; + +windowFrame + : frameType=RANGE start=frameBound + | frameType=ROWS start=frameBound + | frameType=RANGE BETWEEN start=frameBound AND end=frameBound + | frameType=ROWS BETWEEN start=frameBound AND end=frameBound + ; + +frameBound + : UNBOUNDED boundType=PRECEDING #unboundedFrame + | UNBOUNDED boundType=FOLLOWING #unboundedFrame + | CURRENT ROW #currentRowBound + | expression boundType=(PRECEDING | FOLLOWING) #boundedFrame // expression should be unsignedLiteral + ; + + +explainOption + : FORMAT value=(TEXT | GRAPHVIZ) #explainFormat + | TYPE value=(LOGICAL | DISTRIBUTED) #explainType + ; + +transactionMode + : ISOLATION LEVEL levelOfIsolation #isolationLevel + | READ accessMode=(ONLY | WRITE) #transactionAccessMode + ; + +levelOfIsolation + : READ UNCOMMITTED #readUncommitted + | READ COMMITTED #readCommitted + | REPEATABLE READ #repeatableRead + | SERIALIZABLE #serializable + ; + +callArgument + : expression #positionalArgument + | identifier '=>' expression #namedArgument + ; + +privilege + : SELECT | DELETE | INSERT | identifier + ; + +qualifiedName + : identifier ('.' identifier)* + ; + +identifier + : IDENTIFIER #unquotedIdentifier + | quotedIdentifier #quotedIdentifierAlternative + | nonReserved #unquotedIdentifier + | BACKQUOTED_IDENTIFIER #backQuotedIdentifier + | DIGIT_IDENTIFIER #digitIdentifier + ; + +quotedIdentifier + : QUOTED_IDENTIFIER + ; + +number + : DECIMAL_VALUE #decimalLiteral + | INTEGER_VALUE #integerLiteral + ; + +nonReserved + : SHOW | TABLES | COLUMNS | COLUMN | PARTITIONS | FUNCTIONS | SCHEMAS | CATALOGS | SESSION + | ADD + | OVER | PARTITION | RANGE | ROWS | PRECEDING | FOLLOWING | CURRENT | ROW | MAP | ARRAY + | TINYINT | SMALLINT | INTEGER | DATE | TIME | TIMESTAMP | INTERVAL | ZONE + | YEAR | MONTH | DAY | HOUR | MINUTE | SECOND + | EXPLAIN | ANALYZE | FORMAT | TYPE | TEXT | GRAPHVIZ | LOGICAL | DISTRIBUTED + | TABLESAMPLE | SYSTEM | BERNOULLI | POISSONIZED | USE | TO + | RESCALED | APPROXIMATE | AT | CONFIDENCE + | SET | RESET + | VIEW | REPLACE + | IF | NULLIF | COALESCE + | TRY + | normalForm + | POSITION + | NO | DATA + | START | TRANSACTION | COMMIT | ROLLBACK | WORK | ISOLATION | LEVEL + | SERIALIZABLE | REPEATABLE | COMMITTED | UNCOMMITTED | READ | WRITE | ONLY + | CALL + | GRANT | REVOKE | PRIVILEGES | PUBLIC | OPTION + | SUBSTRING + ; + +normalForm + : NFD | NFC | NFKD | NFKC + ; + +SELECT: 'SELECT'; +FROM: 'FROM'; +ADD: 'ADD'; +AS: 'AS'; +ALL: 'ALL'; +SOME: 'SOME'; +ANY: 'ANY'; +DISTINCT: 'DISTINCT'; +WHERE: 'WHERE'; +WINDOW: 'WINDOW'; +GROUP: 'GROUP'; +BY: 'BY'; +GROUPING: 'GROUPING'; +SETS: 'SETS'; +CUBE: 'CUBE'; +ROLLUP: 'ROLLUP'; +ORDER: 'ORDER'; +HAVING: 'HAVING'; +LIMIT: 'LIMIT'; +APPROXIMATE: 'APPROXIMATE'; +AT: 'AT'; +CONFIDENCE: 'CONFIDENCE'; +OR: 'OR'; +AND: 'AND'; +IN: 'IN'; +NOT: 'NOT'; +NO: 'NO'; +EXISTS: 'EXISTS'; +BETWEEN: 'BETWEEN'; +LIKE: 'LIKE'; +IS: 'IS'; +NULL: 'NULL'; +TRUE: 'TRUE'; +FALSE: 'FALSE'; +NULLS: 'NULLS'; +FIRST: 'FIRST'; +LAST: 'LAST'; +ESCAPE: 'ESCAPE'; +ASC: 'ASC'; +DESC: 'DESC'; +SUBSTRING: 'SUBSTRING'; +POSITION: 'POSITION'; +FOR: 'FOR'; +TINYINT: 'TINYINT'; +SMALLINT: 'SMALLINT'; +INTEGER: 'INTEGER'; +DATE: 'DATE'; +TIME: 'TIME'; +TIMESTAMP: 'TIMESTAMP'; +INTERVAL: 'INTERVAL'; +YEAR: 'YEAR'; +MONTH: 'MONTH'; +DAY: 'DAY'; +HOUR: 'HOUR'; +MINUTE: 'MINUTE'; +SECOND: 'SECOND'; +MILLISECOND: 'MILLISECOND'; +YEARS: 'YEARS'; +MONTHS: 'MONTHS'; +DAYS: 'DAYS'; +HOURS: 'HOURS'; +MINUTES: 'MINUTES'; +SECONDS: 'SECONDS'; +MILLISECONDS: 'MILLISECONDS'; +ZONE: 'ZONE'; +CURRENT_DATE: 'CURRENT_DATE'; +CURRENT_TIME: 'CURRENT_TIME'; +CURRENT_TIMESTAMP: 'CURRENT_TIMESTAMP'; +LOCALTIME: 'LOCALTIME'; +LOCALTIMESTAMP: 'LOCALTIMESTAMP'; +EXTRACT: 'EXTRACT'; +TUMBLING: 'TUMBLING'; +HOPPING: 'HOPPING'; +SIZE: 'SIZE'; +ADVANCE: 'ADVANCE'; +CASE: 'CASE'; +WHEN: 'WHEN'; +THEN: 'THEN'; +ELSE: 'ELSE'; +END: 'END'; +JOIN: 'JOIN'; +CROSS: 'CROSS'; +OUTER: 'OUTER'; +INNER: 'INNER'; +LEFT: 'LEFT'; +RIGHT: 'RIGHT'; +FULL: 'FULL'; +NATURAL: 'NATURAL'; +USING: 'USING'; +ON: 'ON'; +OVER: 'OVER'; +PARTITION: 'PARTITION'; +RANGE: 'RANGE'; +ROWS: 'ROWS'; +UNBOUNDED: 'UNBOUNDED'; +PRECEDING: 'PRECEDING'; +FOLLOWING: 'FOLLOWING'; +CURRENT: 'CURRENT'; +ROW: 'ROW'; +WITH: 'WITH'; +RECURSIVE: 'RECURSIVE'; +VALUES: 'VALUES'; +CREATE: 'CREATE'; +REGISTER: 'REGISTER'; +TABLE: 'TABLE'; +TOPIC: 'TOPIC'; +STREAM: 'STREAM'; +STREAMS: 'STREAMS'; +VIEW: 'VIEW'; +REPLACE: 'REPLACE'; +INSERT: 'INSERT'; +DELETE: 'DELETE'; +INTO: 'INTO'; +CONSTRAINT: 'CONSTRAINT'; +DESCRIBE: 'DESCRIBE'; +PRINT: 'PRINT'; +GRANT: 'GRANT'; +REVOKE: 'REVOKE'; +PRIVILEGES: 'PRIVILEGES'; +PUBLIC: 'PUBLIC'; +OPTION: 'OPTION'; +EXPLAIN: 'EXPLAIN'; +ANALYZE: 'ANALYZE'; +FORMAT: 'FORMAT'; +TYPE: 'TYPE'; +TEXT: 'TEXT'; +GRAPHVIZ: 'GRAPHVIZ'; +LOGICAL: 'LOGICAL'; +DISTRIBUTED: 'DISTRIBUTED'; +TRY: 'TRY'; +CAST: 'CAST'; +TRY_CAST: 'TRY_CAST'; +SHOW: 'SHOW'; +LIST: 'LIST'; +TABLES: 'TABLES'; +TOPICS: 'TOPICS'; +REGISTERED: 'REGISTERED'; +QUERY: 'QUERY'; +QUERIES: 'QUERIES'; +TERMINATE: 'TERMINATE'; +LOAD: 'LOAD'; +SCHEMAS: 'SCHEMAS'; +CATALOGS: 'CATALOGS'; +COLUMNS: 'COLUMNS'; +COLUMN: 'COLUMN'; +USE: 'USE'; +PARTITIONS: 'PARTITIONS'; +FUNCTIONS: 'FUNCTIONS'; +DROP: 'DROP'; +UNION: 'UNION'; +EXCEPT: 'EXCEPT'; +INTERSECT: 'INTERSECT'; +TO: 'TO'; +SYSTEM: 'SYSTEM'; +BERNOULLI: 'BERNOULLI'; +POISSONIZED: 'POISSONIZED'; +TABLESAMPLE: 'TABLESAMPLE'; +RESCALED: 'RESCALED'; +STRATIFY: 'STRATIFY'; +ALTER: 'ALTER'; +RENAME: 'RENAME'; +UNNEST: 'UNNEST'; +ORDINALITY: 'ORDINALITY'; +ARRAY: 'ARRAY'; +MAP: 'MAP'; +SET: 'SET'; +RESET: 'RESET'; +SESSION: 'SESSION'; +DATA: 'DATA'; +START: 'START'; +TRANSACTION: 'TRANSACTION'; +COMMIT: 'COMMIT'; +ROLLBACK: 'ROLLBACK'; +WORK: 'WORK'; +ISOLATION: 'ISOLATION'; +LEVEL: 'LEVEL'; +SERIALIZABLE: 'SERIALIZABLE'; +REPEATABLE: 'REPEATABLE'; +COMMITTED: 'COMMITTED'; +UNCOMMITTED: 'UNCOMMITTED'; +READ: 'READ'; +WRITE: 'WRITE'; +ONLY: 'ONLY'; +CALL: 'CALL'; +PREPARE: 'PREPARE'; +DEALLOCATE: 'DEALLOCATE'; +EXECUTE: 'EXECUTE'; +SAMPLE: 'SAMPLE'; +EXPORT: 'EXPORT'; +CATALOG: 'CATALOG'; +PROPERTIES: 'PROPERTIES'; +BEGINNING: 'BEGINNING'; +UNSET: 'UNSET'; +RUN: 'RUN'; +SCRIPT: 'SCRIPT'; + +NORMALIZE: 'NORMALIZE'; +NFD : 'NFD'; +NFC : 'NFC'; +NFKD : 'NFKD'; +NFKC : 'NFKC'; + +IF: 'IF'; +NULLIF: 'NULLIF'; +COALESCE: 'COALESCE'; + +EQ : '='; +NEQ : '<>' | '!='; +LT : '<'; +LTE : '<='; +GT : '>'; +GTE : '>='; + +PLUS: '+'; +MINUS: '-'; +ASTERISK: '*'; +SLASH: '/'; +PERCENT: '%'; +CONCAT: '||'; + +STRING + : '\'' ( ~'\'' | '\'\'' )* '\'' + ; + +// Note: we allow any character inside the binary literal and validate +// its a correct literal when the AST is being constructed. This +// allows us to provide more meaningful error messages to the user +BINARY_LITERAL + : 'X\'' (~'\'')* '\'' + ; + +INTEGER_VALUE + : DIGIT+ + ; + +DECIMAL_VALUE + : DIGIT+ '.' DIGIT* + | '.' DIGIT+ + | DIGIT+ ('.' DIGIT*)? EXPONENT + | '.' DIGIT+ EXPONENT + ; + +IDENTIFIER + : (LETTER | '_') (LETTER | DIGIT | '_' | '@' | ':')* + ; + +DIGIT_IDENTIFIER + : DIGIT (LETTER | DIGIT | '_' | '@' | ':')+ + ; + +QUOTED_IDENTIFIER + : '"' ( ~'"' | '""' )* '"' + ; + +BACKQUOTED_IDENTIFIER + : '`' ( ~'`' | '``' )* '`' + ; + +TIME_WITH_TIME_ZONE + : 'TIME' WS 'WITH' WS 'TIME' WS 'ZONE' + ; + +TIMESTAMP_WITH_TIME_ZONE + : 'TIMESTAMP' WS 'WITH' WS 'TIME' WS 'ZONE' + ; + +fragment EXPONENT + : 'E' [+-]? DIGIT+ + ; + +fragment DIGIT + : [0-9] + ; + +fragment LETTER + : [A-Z] + ; + +SIMPLE_COMMENT + : '--' ~[\r\n]* '\r'? '\n'? -> channel(HIDDEN) + ; + +BRACKETED_COMMENT + : '/*' .*? '*/' -> channel(HIDDEN) + ; + +WS + : [ \r\n\t]+ -> channel(HIDDEN) + ; + +// Catch-all for anything we can't recognize. +// We use this to be able to ignore and recover all the text +// when splitting statements with DelimiterLexer +UNRECOGNIZED + : . + ; diff --git a/ksql-core/src/main/java/io/confluent/ksql/KsqlContext.java b/ksql-core/src/main/java/io/confluent/ksql/KsqlContext.java new file mode 100644 index 000000000000..3ebde21c75c5 --- /dev/null +++ b/ksql-core/src/main/java/io/confluent/ksql/KsqlContext.java @@ -0,0 +1,96 @@ +/** + * Copyright 2017 Confluent Inc. + **/ + +package io.confluent.ksql; + +import io.confluent.ksql.util.KafkaTopicClientImpl; +import io.confluent.ksql.util.KsqlConfig; +import org.apache.kafka.streams.StreamsConfig; +import org.slf4j.Logger; +import org.slf4j.LoggerFactory; + +import java.util.Collections; +import java.util.HashMap; +import java.util.List; +import java.util.Map; + +import io.confluent.ksql.util.KsqlException; +import io.confluent.ksql.util.PersistentQueryMetadata; +import io.confluent.ksql.util.QueryMetadata; + +public class KsqlContext { + + private static final Logger log = LoggerFactory.getLogger(KsqlContext.class); + final KsqlEngine ksqlEngine; + private static final String APPLICATION_ID_OPTION_DEFAULT = "ksql_standalone_cli"; + private static final String KAFKA_BOOTSTRAP_SERVER_OPTION_DEFAULT = "localhost:9092"; + + public KsqlContext() { + this(null); + } + + /** + * Create a KSQL context object with the given properties. + * A KSQL context has it's own metastore valid during the life of the object. + * + * @param streamsProperties + */ + public KsqlContext(Map streamsProperties) { + if (streamsProperties == null) { + streamsProperties = new HashMap<>(); + } + if (!streamsProperties.containsKey(StreamsConfig.APPLICATION_ID_CONFIG)) { + streamsProperties.put(StreamsConfig.APPLICATION_ID_CONFIG, APPLICATION_ID_OPTION_DEFAULT); + } + if (!streamsProperties.containsKey(StreamsConfig.BOOTSTRAP_SERVERS_CONFIG)) { + streamsProperties.put(StreamsConfig.BOOTSTRAP_SERVERS_CONFIG, KAFKA_BOOTSTRAP_SERVER_OPTION_DEFAULT); + } + KsqlConfig ksqlConfig = new KsqlConfig(streamsProperties); + ksqlEngine = new KsqlEngine(ksqlConfig, new KafkaTopicClientImpl(ksqlConfig)); + } + + /** + * Execute the ksql statement in this context. + * + * @param sql + * @throws Exception + */ + public void sql(String sql) throws Exception { + List queryMetadataList = ksqlEngine.buildMultipleQueries( + false, sql, Collections.emptyMap()); + for (QueryMetadata queryMetadata: queryMetadataList) { + if (queryMetadata instanceof PersistentQueryMetadata) { + PersistentQueryMetadata persistentQueryMetadata = (PersistentQueryMetadata) queryMetadata; + persistentQueryMetadata.getKafkaStreams().start(); + ksqlEngine.getPersistentQueries() + .put(persistentQueryMetadata.getId(), persistentQueryMetadata); + } else { + System.err.println("Ignoring statemenst: " + sql); + System.err.println("Only CREATE statements can run in KSQL embedded mode."); + log.warn("Ignoring statemenst: {}", sql); + log.warn("Only CREATE statements can run in KSQL embedded mode."); + } + } + } + + /** + * Terminate a query with the given id. + * + * @param queryId + */ + public void terminateQuery(long queryId) { + if (!ksqlEngine.getPersistentQueries().containsKey(queryId)) { + throw new KsqlException(String.format("Invalid query id. Query id, %d, does not exist.", + queryId)); + } + PersistentQueryMetadata persistentQueryMetadata = ksqlEngine + .getPersistentQueries().get(queryId); + persistentQueryMetadata.getKafkaStreams().close(); + ksqlEngine.getPersistentQueries().remove(queryId); + } + + public Map getRunningQueries() { + return ksqlEngine.getPersistentQueries(); + } +} diff --git a/ksql-core/src/main/java/io/confluent/ksql/KsqlEngine.java b/ksql-core/src/main/java/io/confluent/ksql/KsqlEngine.java new file mode 100644 index 000000000000..4d74dbfdfc95 --- /dev/null +++ b/ksql-core/src/main/java/io/confluent/ksql/KsqlEngine.java @@ -0,0 +1,388 @@ +/** + * Copyright 2017 Confluent Inc. + **/ + +package io.confluent.ksql; + +import io.confluent.ksql.ddl.DdlConfig; +import io.confluent.ksql.ddl.commands.*; +import io.confluent.ksql.exception.ParseFailedException; +import io.confluent.ksql.metastore.*; +import io.confluent.ksql.parser.KsqlParser; +import io.confluent.ksql.parser.SqlBaseParser; +import io.confluent.ksql.parser.tree.CreateStream; +import io.confluent.ksql.parser.tree.CreateStreamAsSelect; +import io.confluent.ksql.parser.tree.CreateTable; +import io.confluent.ksql.parser.tree.CreateTableAsSelect; +import io.confluent.ksql.parser.tree.RegisterTopic; +import io.confluent.ksql.parser.tree.Expression; +import io.confluent.ksql.parser.tree.QualifiedName; +import io.confluent.ksql.parser.tree.Query; +import io.confluent.ksql.parser.tree.QuerySpecification; +import io.confluent.ksql.parser.tree.Statement; +import io.confluent.ksql.parser.tree.Table; +import io.confluent.ksql.planner.plan.PlanNode; +import io.confluent.ksql.util.*; +import org.antlr.v4.runtime.CharStream; +import org.antlr.v4.runtime.misc.Interval; +import org.apache.kafka.streams.StreamsConfig; +import org.slf4j.Logger; +import org.slf4j.LoggerFactory; + +import java.io.Closeable; +import java.io.IOException; +import java.util.ArrayList; +import java.util.Arrays; +import java.util.Collections; +import java.util.HashMap; +import java.util.HashSet; +import java.util.List; +import java.util.Map; +import java.util.Objects; +import java.util.Optional; +import java.util.Set; +import java.util.concurrent.TimeUnit; + +public class KsqlEngine implements Closeable { + + private static final Logger log = LoggerFactory.getLogger(KsqlEngine.class); + + // TODO: Decide if any other properties belong in here + private static final Set IMMUTABLE_PROPERTIES = new HashSet<>(Arrays.asList( + StreamsConfig.BOOTSTRAP_SERVERS_CONFIG + )); + + private KsqlConfig ksqlConfig; + + private final MetaStore metaStore; + private final KafkaTopicClient kafkaTopicClient; + private final DDLCommandExec ddlCommandExec; + private final QueryEngine queryEngine; + + private final Map persistentQueries; + private final Set liveQueries; + + public KsqlEngine(final KsqlConfig ksqlConfig, final KafkaTopicClient kafkaTopicClient) { + Objects.requireNonNull(ksqlConfig, "Streams properties map cannot be null as it may be mutated later on"); + + this.ksqlConfig = ksqlConfig; + + this.metaStore = new MetaStoreImpl(); + this.kafkaTopicClient = kafkaTopicClient; + this.ddlCommandExec = new DDLCommandExec(metaStore); + this.queryEngine = new QueryEngine(this); + + this.persistentQueries = new HashMap<>(); + this.liveQueries = new HashSet<>(); + } + + /** + * Runs the set of queries in the given query string. + * + * @param createNewAppId If a new application id should be generated. + * @param queriesString The ksql query string. + * @return List of query metadata. + * @throws Exception Any exception thrown here! + */ + public List buildMultipleQueries( + final boolean createNewAppId, + final String queriesString, + final Map overriddenProperties + ) throws Exception { + for (String property : overriddenProperties.keySet()) { + if (IMMUTABLE_PROPERTIES.contains(property)) { + throw new IllegalArgumentException( + String.format("Cannot override property '%s'", property) + ); + } + } + + // Multiple queries submitted as the same time should success or fail as a whole, + // Thus we use tempMetaStore to store newly created tables, streams or topics. + // MetaStore tempMetaStore = new MetaStoreImpl(metaStore); + + MetaStore tempMetaStore = metaStore.clone(); + // Build query AST from the query string + List> queries = parseQueries(queriesString, overriddenProperties, + tempMetaStore); + + return planQueries(createNewAppId, queries, overriddenProperties, tempMetaStore); + + } + + public List planQueries(final boolean createNewAppId, + final List> statementList, + final Map overriddenProperties, + final MetaStore tempMetaStore) + throws Exception { + + // Logical plan creation from the ASTs + List> logicalPlans = queryEngine.buildLogicalPlans(tempMetaStore, statementList); + + // Physical plan creation from logical plans. + List runningQueries = queryEngine.buildPhysicalPlans( + createNewAppId, + logicalPlans, + statementList, + overriddenProperties, + true + ); + + for (QueryMetadata queryMetadata : runningQueries) { + if (queryMetadata instanceof PersistentQueryMetadata) { + liveQueries.add(queryMetadata); + PersistentQueryMetadata persistentQueryMetadata = (PersistentQueryMetadata) queryMetadata; + persistentQueries.put(persistentQueryMetadata.getId(), persistentQueryMetadata); + } + } + + return runningQueries; + } + + public QueryMetadata getQueryExecutionPlan(final Query query) throws Exception { + + // Logical plan creation from the ASTs + List> logicalPlans = queryEngine.buildLogicalPlans(metaStore, Arrays + .asList(new Pair<>("", query))); + + // Physical plan creation from logical plans. + List runningQueries = queryEngine.buildPhysicalPlans( + false, + logicalPlans, + Arrays.asList(new Pair<>("", query)), + Collections.emptyMap(), + false + ); + return runningQueries.get(0); + } + + + public List> parseQueries(final String queriesString, + final Map overriddenProperties, + final MetaStore tempMetaStore) { + try { + MetaStore tempMetaStoreForParser = tempMetaStore.clone(); + // Parse and AST creation + KsqlParser ksqlParser = new KsqlParser(); + List + parsedStatements = + ksqlParser.getStatements(queriesString); + List> queryList = new ArrayList<>(); + for (SqlBaseParser.SingleStatementContext singleStatementContext : parsedStatements) { + Pair statementInfo = + ksqlParser.prepareStatement(singleStatementContext, tempMetaStoreForParser); + Statement statement = statementInfo.getLeft(); + Pair queryPair = + buildSingleQueryAst( + statement, + getStatementString(singleStatementContext), + tempMetaStore, + tempMetaStoreForParser, + overriddenProperties); + if (queryPair != null) { + queryList.add(queryPair); + } + } + return queryList; + } catch (Exception e) { + throw new ParseFailedException("Parsing failed on KsqlEngine msg:" + e.getMessage(), e); + } + } + + private Pair buildSingleQueryAst(final Statement statement, + final String statementString, + final MetaStore tempMetaStore, + final MetaStore tempMetaStoreForParser, + final Map overriddenProperties + ) { + + log.info("Building AST for {}.", statementString); + + if (statement instanceof Query) { + return new Pair<>(statementString, (Query) statement); + } else if (statement instanceof CreateStreamAsSelect) { + CreateStreamAsSelect createStreamAsSelect = (CreateStreamAsSelect) statement; + QuerySpecification querySpecification = + (QuerySpecification) createStreamAsSelect.getQuery().getQueryBody(); + Query query = addInto( + createStreamAsSelect.getQuery(), + querySpecification, + createStreamAsSelect.getName().getSuffix(), + createStreamAsSelect.getProperties(), + createStreamAsSelect.getPartitionByColumn() + ); + tempMetaStoreForParser.putSource(queryEngine.getResultDatasource( + querySpecification.getSelect(), + createStreamAsSelect.getName().getSuffix() + ).cloneWithTimeKeyColumns()); + return new Pair<>(statementString, query); + } else if (statement instanceof CreateTableAsSelect) { + CreateTableAsSelect createTableAsSelect = (CreateTableAsSelect) statement; + QuerySpecification querySpecification = + (QuerySpecification) createTableAsSelect.getQuery().getQueryBody(); + + Query query = addInto( + createTableAsSelect.getQuery(), + querySpecification, + createTableAsSelect.getName().getSuffix(), + createTableAsSelect.getProperties(), + Optional.empty() + ); + + tempMetaStoreForParser.putSource(queryEngine.getResultDatasource( + querySpecification.getSelect(), + createTableAsSelect.getName().getSuffix() + ).cloneWithTimeKeyColumns()); + return new Pair<>(statementString, query); + } else if (statement instanceof RegisterTopic) { + ddlCommandExec.tryExecute( + new RegisterTopicCommand( + (RegisterTopic) statement, + overriddenProperties), + tempMetaStoreForParser); + ddlCommandExec.tryExecute( + new RegisterTopicCommand( + (RegisterTopic) statement, + overriddenProperties), + tempMetaStore); + return new Pair<>(statementString, statement); + } else if (statement instanceof CreateStream) { + ddlCommandExec.tryExecute( + new CreateStreamCommand( + (CreateStream) statement, overriddenProperties, kafkaTopicClient), + tempMetaStoreForParser); + ddlCommandExec.tryExecute( + new CreateStreamCommand( + (CreateStream) statement, overriddenProperties, kafkaTopicClient), + tempMetaStore); + return new Pair<>(statementString, statement); + } else if (statement instanceof CreateTable) { + ddlCommandExec.tryExecute( + new CreateTableCommand( + (CreateTable) statement, overriddenProperties, kafkaTopicClient), + tempMetaStoreForParser); + ddlCommandExec.tryExecute( + new CreateTableCommand( + (CreateTable) statement, overriddenProperties, kafkaTopicClient), + tempMetaStore); + return new Pair<>(statementString, statement); + } + return null; + } + + public static String getStatementString( + final SqlBaseParser.SingleStatementContext singleStatementContext) { + CharStream charStream = singleStatementContext.start.getInputStream(); + return charStream.getText(new Interval( + singleStatementContext.start.getStartIndex(), + singleStatementContext.stop.getStopIndex() + )); + } + + public List getStatements(final String sqlString) { + return new KsqlParser().buildAst(sqlString, metaStore); + } + + + public Query addInto(final Query query, final QuerySpecification querySpecification, + final String intoName, + final Map intoProperties, + final Optional partitionByExpression) { + Table intoTable = new Table(QualifiedName.of(intoName)); + if (partitionByExpression.isPresent()) { + Map newIntoProperties = new HashMap<>(); + newIntoProperties.putAll(intoProperties); + newIntoProperties.put(DdlConfig.PARTITION_BY_PROPERTY, partitionByExpression.get()); + intoTable.setProperties(newIntoProperties); + } else { + intoTable.setProperties(intoProperties); + } + + QuerySpecification newQuerySpecification = new QuerySpecification( + querySpecification.getSelect(), + Optional.of(intoTable), + querySpecification.getFrom(), + querySpecification.getWindowExpression(), + querySpecification.getWhere(), + querySpecification.getGroupBy(), + querySpecification.getHaving(), + querySpecification.getOrderBy(), + querySpecification.getLimit() + ); + return new Query(query.getWith(), newQuerySpecification, query.getOrderBy(), query.getLimit()); + } + + public MetaStore getMetaStore() { + return metaStore; + } + + public KafkaTopicClient getKafkaTopicClient() { + return kafkaTopicClient; + } + + public DDLCommandExec getDDLCommandExec() { + return ddlCommandExec; + } + + public boolean terminateQuery(final long queryId, final boolean closeStreams) { + QueryMetadata queryMetadata = persistentQueries.remove(queryId); + if (queryMetadata == null) { + return false; + } + liveQueries.remove(queryMetadata); + if (closeStreams) { + queryMetadata.getKafkaStreams().close(100L, TimeUnit.MILLISECONDS); + queryMetadata.getKafkaStreams().cleanUp(); + } + return true; + } + + public Map getPersistentQueries() { + return new HashMap<>(persistentQueries); + } + + public Set getLiveQueries() { + return new HashSet<>(liveQueries); + } + + public static List getImmutableProperties() { + return new ArrayList<>(IMMUTABLE_PROPERTIES); + } + + public Map getKsqlConfigProperties() { + return ksqlConfig.getKsqlConfigProps(); + } + + public KsqlConfig getKsqlConfig() { + return ksqlConfig; + } + + @Override + public void close() throws IOException { + for (QueryMetadata queryMetadata : liveQueries) { + queryMetadata.getKafkaStreams().close(100L, TimeUnit.MILLISECONDS); + queryMetadata.getKafkaStreams().cleanUp(); + } + kafkaTopicClient.close(); + } + + public QueryEngine getQueryEngine() { + return queryEngine; + } + + public boolean terminateAllQueries() { + try { + for (QueryMetadata queryMetadata: liveQueries) { + if (queryMetadata instanceof PersistentQueryMetadata) { + PersistentQueryMetadata persistentQueryMetadata = (PersistentQueryMetadata) queryMetadata; + persistentQueryMetadata.getKafkaStreams().close(100l, TimeUnit.MILLISECONDS); + persistentQueryMetadata.getKafkaStreams().cleanUp(); + } + } + } catch (Exception e) { + return false; + } + + return true; + } +} diff --git a/ksql-core/src/main/java/io/confluent/ksql/QueryEngine.java b/ksql-core/src/main/java/io/confluent/ksql/QueryEngine.java new file mode 100644 index 000000000000..fabd19ab8c73 --- /dev/null +++ b/ksql-core/src/main/java/io/confluent/ksql/QueryEngine.java @@ -0,0 +1,413 @@ +/** + * Copyright 2017 Confluent Inc. + **/ + +package io.confluent.ksql; + +import io.confluent.ksql.analyzer.AggregateAnalysis; +import io.confluent.ksql.analyzer.AggregateAnalyzer; +import io.confluent.ksql.analyzer.Analysis; +import io.confluent.ksql.analyzer.AnalysisContext; +import io.confluent.ksql.analyzer.Analyzer; +import io.confluent.ksql.ddl.commands.CreateStreamCommand; +import io.confluent.ksql.ddl.commands.CreateTableCommand; +import io.confluent.ksql.ddl.commands.DDLCommand; +import io.confluent.ksql.ddl.commands.DDLCommandResult; +import io.confluent.ksql.ddl.commands.DropSourceCommand; +import io.confluent.ksql.ddl.commands.DropTopicCommand; +import io.confluent.ksql.ddl.commands.RegisterTopicCommand; +import io.confluent.ksql.metastore.DataSource; +import io.confluent.ksql.metastore.KsqlStream; +import io.confluent.ksql.metastore.KsqlTable; +import io.confluent.ksql.metastore.KsqlTopic; +import io.confluent.ksql.metastore.MetaStore; +import io.confluent.ksql.metastore.StructuredDataSource; +import io.confluent.ksql.parser.rewrite.AggregateExpressionRewriter; +import io.confluent.ksql.parser.tree.CreateStream; +import io.confluent.ksql.parser.tree.CreateTable; +import io.confluent.ksql.parser.tree.DropStream; +import io.confluent.ksql.parser.tree.DropTable; +import io.confluent.ksql.parser.tree.DropTopic; +import io.confluent.ksql.parser.tree.Expression; +import io.confluent.ksql.parser.tree.ExpressionTreeRewriter; +import io.confluent.ksql.parser.tree.Query; +import io.confluent.ksql.parser.tree.QuerySpecification; +import io.confluent.ksql.parser.tree.RegisterTopic; +import io.confluent.ksql.parser.tree.Select; +import io.confluent.ksql.parser.tree.SelectItem; +import io.confluent.ksql.parser.tree.SingleColumn; +import io.confluent.ksql.parser.tree.Statement; +import io.confluent.ksql.physical.PhysicalPlanBuilder; +import io.confluent.ksql.planner.LogicalPlanner; +import io.confluent.ksql.planner.plan.KsqlBareOutputNode; +import io.confluent.ksql.planner.plan.KsqlStructuredDataOutputNode; +import io.confluent.ksql.planner.plan.OutputNode; +import io.confluent.ksql.planner.plan.PlanNode; +import io.confluent.ksql.structured.QueuedSchemaKStream; +import io.confluent.ksql.structured.SchemaKStream; +import io.confluent.ksql.structured.SchemaKTable; +import io.confluent.ksql.util.KsqlConfig; +import io.confluent.ksql.util.KsqlException; +import io.confluent.ksql.util.Pair; +import io.confluent.ksql.util.PersistentQueryMetadata; +import io.confluent.ksql.util.QueryMetadata; +import io.confluent.ksql.util.QueuedQueryMetadata; +import io.confluent.ksql.util.timestamp.KsqlTimestampExtractor; +import org.apache.kafka.clients.consumer.ConsumerConfig; +import org.apache.kafka.connect.data.Schema; +import org.apache.kafka.connect.data.SchemaBuilder; +import org.apache.kafka.streams.KafkaStreams; +import org.apache.kafka.streams.StreamsConfig; +import org.apache.kafka.streams.kstream.KStreamBuilder; +import org.slf4j.Logger; +import org.slf4j.LoggerFactory; + +import java.util.ArrayList; +import java.util.List; +import java.util.Map; +import java.util.concurrent.ThreadLocalRandom; +import java.util.concurrent.atomic.AtomicLong; + +public class QueryEngine { + + private static final Logger log = LoggerFactory.getLogger(QueryEngine.class); + private final AtomicLong queryIdCounter; + private final KsqlEngine ksqlEngine; + + + public QueryEngine(final KsqlEngine ksqlEngine) { + this.queryIdCounter = new AtomicLong(1); + this.ksqlEngine = ksqlEngine; + } + + + public List> buildLogicalPlans( + final MetaStore metaStore, + final List> statementList) { + + List> logicalPlansList = new ArrayList<>(); + // TODO: the purpose of tempMetaStore here + MetaStore tempMetaStore = metaStore.clone(); + + for (Pair statementQueryPair : statementList) { + if (statementQueryPair.getRight() instanceof Query) { + PlanNode logicalPlan = buildQueryLogicalPlan((Query) statementQueryPair.getRight(), + tempMetaStore); + logicalPlansList.add(new Pair<>(statementQueryPair.getLeft(), logicalPlan)); + } else { + logicalPlansList.add(new Pair<>(statementQueryPair.getLeft(), null)); + } + + log.info("Build logical plan for {}.", statementQueryPair.getLeft()); + } + return logicalPlansList; + } + + public PlanNode buildQueryLogicalPlan(final Query query, final MetaStore tempMetaStore) { + + // Analyze the query to resolve the references and extract operations + Analysis analysis = new Analysis(); + Analyzer analyzer = new Analyzer(analysis, tempMetaStore); + analyzer.process(query, new AnalysisContext(null, null)); + + AggregateAnalysis aggregateAnalysis = new AggregateAnalysis(); + AggregateAnalyzer aggregateAnalyzer = new + AggregateAnalyzer(aggregateAnalysis, tempMetaStore, analysis); + AggregateExpressionRewriter aggregateExpressionRewriter = new AggregateExpressionRewriter(); + for (Expression expression: analysis.getSelectExpressions()) { + aggregateAnalyzer + .process(expression, new AnalysisContext(null, null)); + if (!aggregateAnalyzer.isHasAggregateFunction()) { + aggregateAnalysis.getNonAggResultColumns().add(expression); + } + aggregateAnalysis.getFinalSelectExpressions() + .add(ExpressionTreeRewriter.rewriteWith(aggregateExpressionRewriter, expression)); + aggregateAnalyzer.setHasAggregateFunction(false); + } + + if (!aggregateAnalysis.getAggregateFunctionArguments().isEmpty() && + analysis.getGroupByExpressions().isEmpty()) { + throw new KsqlException("Aggregate query needs GROUP BY clause."); + } + // TODO: make sure only aggregates are in the expression. For now we assume this is the case. + if (analysis.getHavingExpression() != null) { + aggregateAnalyzer.process(analysis.getHavingExpression(), + new AnalysisContext(null, null)); + if (!aggregateAnalyzer.isHasAggregateFunction()) { + aggregateAnalysis.getNonAggResultColumns().add(analysis.getHavingExpression()); + } + aggregateAnalysis + .setHavingExpression(ExpressionTreeRewriter.rewriteWith(aggregateExpressionRewriter, + analysis.getHavingExpression())); + aggregateAnalyzer.setHasAggregateFunction(false); + } + + enforceAggregateRules(query, aggregateAnalysis); + + + // Build a logical plan + PlanNode logicalPlan = new LogicalPlanner(analysis, aggregateAnalysis).buildPlan(); + if (logicalPlan instanceof KsqlStructuredDataOutputNode) { + KsqlStructuredDataOutputNode ksqlStructuredDataOutputNode = + (KsqlStructuredDataOutputNode) logicalPlan; + + StructuredDataSource + structuredDataSource = + new KsqlStream(ksqlStructuredDataOutputNode.getId().toString(), + ksqlStructuredDataOutputNode.getSchema(), + ksqlStructuredDataOutputNode.getKeyField(), + ksqlStructuredDataOutputNode.getTimestampField() == null + ? ksqlStructuredDataOutputNode.getTheSourceNode().getTimestampField() + : ksqlStructuredDataOutputNode.getTimestampField(), + ksqlStructuredDataOutputNode.getKsqlTopic()); + + tempMetaStore.putTopic(ksqlStructuredDataOutputNode.getKsqlTopic()); + tempMetaStore.putSource(structuredDataSource.cloneWithTimeKeyColumns()); + } + return logicalPlan; + } + + public List buildPhysicalPlans( + final boolean addUniqueTimeSuffix, + final List> logicalPlans, + final List> statementList, + final Map overriddenStreamsProperties, + final boolean updateMetastore + ) throws Exception { + + List physicalPlans = new ArrayList<>(); + + for (int i = 0; i < logicalPlans.size(); i++) { + + Pair statementPlanPair = logicalPlans.get(i); + if (statementPlanPair.getRight() == null) { + handleDdlStatement(statementList.get(i).getRight(), overriddenStreamsProperties); + } else { + buildQueryPhysicalPlan(physicalPlans, addUniqueTimeSuffix, statementPlanPair, + overriddenStreamsProperties, updateMetastore); + } + + } + return physicalPlans; + } + + public void buildQueryPhysicalPlan(final List physicalPlans, + final boolean addUniqueTimeSuffix, + final Pair statementPlanPair, + final Map overriddenStreamsProperties, + final boolean updateMetastore) throws Exception { + + PlanNode logicalPlan = statementPlanPair.getRight(); + KStreamBuilder builder = new KStreamBuilder(); + + KsqlConfig ksqlConfigClone = ksqlEngine.getKsqlConfig().clone(); + + // Build a physical plan, in this case a Kafka Streams DSL + PhysicalPlanBuilder physicalPlanBuilder = + new PhysicalPlanBuilder(builder, ksqlConfigClone, ksqlEngine.getKafkaTopicClient()); + SchemaKStream schemaKStream = physicalPlanBuilder.buildPhysicalPlan(logicalPlan); + + OutputNode outputNode = physicalPlanBuilder.getPlanSink(); + boolean isBareQuery = outputNode instanceof KsqlBareOutputNode; + + // Check to make sure the logical and physical plans match up; + // important to do this BEFORE actually starting up + // the corresponding Kafka Streams job + if (isBareQuery && !(schemaKStream instanceof QueuedSchemaKStream)) { + throw new Exception(String.format( + "Mismatch between logical and physical output; " + + "expected a QueuedSchemaKStream based on logical " + + "KsqlBareOutputNode, found a %s instead", + schemaKStream.getClass().getCanonicalName() + )); + } + String serviceId = ksqlEngine.getKsqlConfig() + .get(KsqlConfig.KSQL_SERVICE_ID_CONFIG).toString(); + String persistance_query_prefix = ksqlEngine.getKsqlConfig() + .get(KsqlConfig.KSQL_PERSISTENT_QUERY_NAME_PREFIX_CONFIG).toString(); + String transient_query_prefix = ksqlEngine.getKsqlConfig() + .get(KsqlConfig.KSQL_TRANSIENT_QUERY_NAME_PREFIX_CONFIG).toString(); + if (isBareQuery) { + String applicationId = getBareQueryApplicationId(serviceId, transient_query_prefix); + if (addUniqueTimeSuffix) { + applicationId = addTimeSuffix(applicationId); + } + + KafkaStreams streams = + buildStreams(builder, applicationId, ksqlConfigClone, overriddenStreamsProperties); + + QueuedSchemaKStream queuedSchemaKStream = (QueuedSchemaKStream) schemaKStream; + KsqlBareOutputNode ksqlBareOutputNode = (KsqlBareOutputNode) outputNode; + + SchemaKStream sourceSchemaKstream = schemaKStream.getSourceSchemaKStreams().get(0); + + physicalPlans.add(new QueuedQueryMetadata( + statementPlanPair.getLeft(), + streams, + ksqlBareOutputNode, + schemaKStream.getExecutionPlan(""), + queuedSchemaKStream.getQueue(), + (sourceSchemaKstream instanceof SchemaKTable)? + DataSource.DataSourceType.KTABLE: DataSource.DataSourceType.KSTREAM + )); + + } else if (outputNode instanceof KsqlStructuredDataOutputNode) { + long queryId = getNextQueryId(); + + String applicationId = serviceId + persistance_query_prefix + + queryId; + if (addUniqueTimeSuffix) { + applicationId = addTimeSuffix(applicationId); + } + + KafkaStreams streams = + buildStreams(builder, applicationId, ksqlConfigClone, overriddenStreamsProperties); + + KsqlStructuredDataOutputNode kafkaTopicOutputNode = + (KsqlStructuredDataOutputNode) outputNode; + physicalPlans.add( + new PersistentQueryMetadata(statementPlanPair.getLeft(), + streams, kafkaTopicOutputNode, schemaKStream + .getExecutionPlan(""), queryId, + (schemaKStream instanceof SchemaKTable)? DataSource + .DataSourceType.KTABLE: DataSource.DataSourceType.KSTREAM) + ); + + MetaStore metaStore = ksqlEngine.getMetaStore(); + if (metaStore.getTopic(kafkaTopicOutputNode.getKafkaTopicName()) == null) { + metaStore.putTopic(kafkaTopicOutputNode.getKsqlTopic()); + } + StructuredDataSource sinkDataSource; + if (schemaKStream instanceof SchemaKTable) { + SchemaKTable schemaKTable = (SchemaKTable) schemaKStream; + sinkDataSource = + new KsqlTable(kafkaTopicOutputNode.getId().toString(), + kafkaTopicOutputNode.getSchema(), + schemaKStream.getKeyField(), + kafkaTopicOutputNode.getTimestampField(), + kafkaTopicOutputNode.getKsqlTopic(), + kafkaTopicOutputNode.getId().toString() + + ksqlEngine.getKsqlConfig().get(KsqlConfig.KSQL_TABLE_STATESTORE_NAME_SUFFIX_CONFIG), + schemaKTable.isWindowed()); + } else { + sinkDataSource = + new KsqlStream(kafkaTopicOutputNode.getId().toString(), + kafkaTopicOutputNode.getSchema(), + schemaKStream.getKeyField(), + kafkaTopicOutputNode.getTimestampField(), + kafkaTopicOutputNode.getKsqlTopic()); + } + + if (updateMetastore) { + metaStore.putSource(sinkDataSource.cloneWithTimeKeyColumns()); + } + } else { + throw new KsqlException("Sink data source is not correct."); + } + log.info("Build physical plan for {}.", statementPlanPair.getLeft()); + log.info(" Execution plan: \n"); + log.info(schemaKStream.getExecutionPlan("")); + } + + public DDLCommandResult handleDdlStatement( + final Statement statement, + final Map overriddenProperties) { + DDLCommand command = generateDDLCommand(statement, overriddenProperties); + return ksqlEngine.getDDLCommandExec().execute(command); + } + + private DDLCommand generateDDLCommand( + final Statement statement, + final Map overriddenProperties) { + if (statement instanceof RegisterTopic) { + return new RegisterTopicCommand((RegisterTopic) statement, overriddenProperties); + } else if (statement instanceof CreateStream) { + return new CreateStreamCommand((CreateStream) statement, overriddenProperties, + ksqlEngine.getKafkaTopicClient()); + } else if (statement instanceof CreateTable) { + return new CreateTableCommand((CreateTable) statement, overriddenProperties, + ksqlEngine.getKafkaTopicClient()); + } else if (statement instanceof DropStream) { + return new DropSourceCommand((DropStream) statement); + } else if (statement instanceof DropTable) { + return new DropSourceCommand((DropTable) statement); + } else if (statement instanceof DropTopic) { + return new DropTopicCommand((DropTopic) statement); + } else { + throw new KsqlException( + "Corresponding command not found for statement: " + statement.toString()); + } + } + + public StructuredDataSource getResultDatasource(final Select select, final String name) { + + SchemaBuilder dataSource = SchemaBuilder.struct().name(name); + for (SelectItem selectItem : select.getSelectItems()) { + if (selectItem instanceof SingleColumn) { + SingleColumn singleColumn = (SingleColumn) selectItem; + String fieldName = singleColumn.getAlias().get(); + dataSource = dataSource.field(fieldName, Schema.BOOLEAN_SCHEMA); + } + } + + KsqlTopic ksqlTopic = new KsqlTopic(name, name, null); + return new KsqlStream(name, dataSource.schema(), null, null, ksqlTopic); + } + + private KafkaStreams buildStreams( + final KStreamBuilder builder, + final String applicationId, + final KsqlConfig ksqlConfig, + final Map overriddenProperties + ) { + Map newStreamsProperties = ksqlConfig.getKsqlConfigProps(); + newStreamsProperties.putAll(overriddenProperties); + newStreamsProperties.put(StreamsConfig.APPLICATION_ID_CONFIG, applicationId); + newStreamsProperties.put( + ConsumerConfig.AUTO_OFFSET_RESET_CONFIG, + ksqlConfig.get(ConsumerConfig.AUTO_OFFSET_RESET_CONFIG)); + newStreamsProperties.put( + StreamsConfig.COMMIT_INTERVAL_MS_CONFIG, + ksqlConfig.get(StreamsConfig.COMMIT_INTERVAL_MS_CONFIG)); + newStreamsProperties.put( + StreamsConfig.CACHE_MAX_BYTES_BUFFERING_CONFIG, + ksqlConfig.get(StreamsConfig.CACHE_MAX_BYTES_BUFFERING_CONFIG)); + if (ksqlConfig.get(KsqlConfig.KSQL_TIMESTAMP_COLUMN_INDEX) != null) { + newStreamsProperties.put( + KsqlConfig.KSQL_TIMESTAMP_COLUMN_INDEX, + ksqlConfig.get(KsqlConfig.KSQL_TIMESTAMP_COLUMN_INDEX)); + newStreamsProperties.put( + StreamsConfig.TIMESTAMP_EXTRACTOR_CLASS_CONFIG, KsqlTimestampExtractor.class); + } + + + return new KafkaStreams(builder, new StreamsConfig(newStreamsProperties)); + } + + private long getNextQueryId() { + return queryIdCounter.getAndIncrement(); + } + + // TODO: This should probably be changed + private String getBareQueryApplicationId(String serviceId, String transientQueryPrefix) { + return serviceId + transientQueryPrefix + + Math.abs(ThreadLocalRandom.current().nextLong()); + } + + private String addTimeSuffix(String original) { + return String.format("%s_%d", original, System.currentTimeMillis()); + } + + private void enforceAggregateRules(Query query, AggregateAnalysis aggregateAnalysis) { + if (!((QuerySpecification) query.getQueryBody()).getGroupBy().isPresent()) { + return; + } + int numberOfNonAggProjections = aggregateAnalysis.getNonAggResultColumns().size(); + int groupBySize = ((QuerySpecification) query.getQueryBody()).getGroupBy().get() + .getGroupingElements().size(); + if (numberOfNonAggProjections != groupBySize) { + throw new KsqlException("Group by elements should match the SELECT expressions."); + } + } + +} \ No newline at end of file diff --git a/ksql-core/src/main/java/io/confluent/ksql/analyzer/AggregateAnalysis.java b/ksql-core/src/main/java/io/confluent/ksql/analyzer/AggregateAnalysis.java new file mode 100644 index 000000000000..aa86884d8bae --- /dev/null +++ b/ksql-core/src/main/java/io/confluent/ksql/analyzer/AggregateAnalysis.java @@ -0,0 +1,61 @@ +/** + * Copyright 2017 Confluent Inc. + **/ + +package io.confluent.ksql.analyzer; + +import io.confluent.ksql.parser.tree.Expression; +import io.confluent.ksql.parser.tree.FunctionCall; + +import java.util.ArrayList; +import java.util.HashMap; +import java.util.List; +import java.util.Map; + +public class AggregateAnalysis { + + List aggregateFunctionArguments = new ArrayList<>(); + List requiredColumnsList = new ArrayList<>(); + + Expression havingExpression = null; + + Map requiredColumnsMap = new HashMap<>(); + + List functionList = new ArrayList<>(); + + List nonAggResultColumns = new ArrayList<>(); + + List finalSelectExpressions = new ArrayList<>(); + + public List getAggregateFunctionArguments() { + return aggregateFunctionArguments; + } + + public List getRequiredColumnsList() { + return requiredColumnsList; + } + + public Map getRequiredColumnsMap() { + return requiredColumnsMap; + } + + public List getFunctionList() { + return functionList; + } + + public List getNonAggResultColumns() { + return nonAggResultColumns; + } + + public List getFinalSelectExpressions() { + return finalSelectExpressions; + } + + public Expression getHavingExpression() { + return havingExpression; + } + + public void setHavingExpression(Expression havingExpression) { + this.havingExpression = havingExpression; + } +} diff --git a/ksql-core/src/main/java/io/confluent/ksql/analyzer/AggregateAnalyzer.java b/ksql-core/src/main/java/io/confluent/ksql/analyzer/AggregateAnalyzer.java new file mode 100644 index 000000000000..5b8bf457d4d7 --- /dev/null +++ b/ksql-core/src/main/java/io/confluent/ksql/analyzer/AggregateAnalyzer.java @@ -0,0 +1,81 @@ +/** + * Copyright 2017 Confluent Inc. + **/ + +package io.confluent.ksql.analyzer; + +import io.confluent.ksql.function.KsqlFunctions; +import io.confluent.ksql.metastore.MetaStore; +import io.confluent.ksql.parser.tree.DereferenceExpression; +import io.confluent.ksql.parser.tree.Expression; +import io.confluent.ksql.parser.tree.FunctionCall; +import io.confluent.ksql.parser.tree.Node; +import io.confluent.ksql.parser.tree.QualifiedName; +import io.confluent.ksql.parser.tree.QualifiedNameReference; +import io.confluent.ksql.planner.DefaultTraversalVisitor; +import io.confluent.ksql.util.SchemaUtil; + +public class AggregateAnalyzer extends DefaultTraversalVisitor { + + private AggregateAnalysis aggregateAnalysis; + private MetaStore metaStore; + private Analysis analysis; + + private boolean hasAggregateFunction = false; + + public boolean isHasAggregateFunction() { + return hasAggregateFunction; + } + + public void setHasAggregateFunction(boolean hasAggregateFunction) { + this.hasAggregateFunction = hasAggregateFunction; + } + + public AggregateAnalyzer(AggregateAnalysis aggregateAnalysis, MetaStore metaStore, + Analysis analysis) { + this.aggregateAnalysis = aggregateAnalysis; + this.metaStore = metaStore; + this.analysis = analysis; + } + + @Override + protected Node visitFunctionCall(final FunctionCall node, final AnalysisContext context) { + String functionName = node.getName().getSuffix(); + if (KsqlFunctions.isAnAggregateFunction(functionName)) { + if (node.getArguments().isEmpty()) { + Expression argExpression; + if (analysis.getJoin() != null) { + Expression baseExpression = new QualifiedNameReference( + QualifiedName.of(analysis.getJoin().getLeftAlias())); + argExpression = new DereferenceExpression(baseExpression, SchemaUtil.ROWTIME_NAME); + } else { + Expression baseExpression = new QualifiedNameReference( + QualifiedName.of(analysis.getFromDataSources().get(0).getRight())); + argExpression = new DereferenceExpression(baseExpression, SchemaUtil.ROWTIME_NAME); + } + aggregateAnalysis.aggregateFunctionArguments.add(argExpression); + node.getArguments().add(argExpression); + } else { + aggregateAnalysis.aggregateFunctionArguments.add(node.getArguments().get(0)); + } + aggregateAnalysis.functionList.add(node); + hasAggregateFunction = true; + } + + for (Expression argExp: node.getArguments()) { + process(argExp, context); + } + return null; + } + + @Override + protected Node visitDereferenceExpression(final DereferenceExpression node, + final AnalysisContext context) { + String name = node.toString(); + if (aggregateAnalysis.getRequiredColumnsMap().get(name) == null) { + aggregateAnalysis.getRequiredColumnsList().add(node); + aggregateAnalysis.getRequiredColumnsMap().put(name, node); + } + return null; + } +} diff --git a/ksql-core/src/main/java/io/confluent/ksql/analyzer/Analysis.java b/ksql-core/src/main/java/io/confluent/ksql/analyzer/Analysis.java new file mode 100644 index 000000000000..c573935d6c42 --- /dev/null +++ b/ksql-core/src/main/java/io/confluent/ksql/analyzer/Analysis.java @@ -0,0 +1,155 @@ +/** + * Copyright 2017 Confluent Inc. + **/ + +package io.confluent.ksql.analyzer; + +import io.confluent.ksql.metastore.StructuredDataSource; +import io.confluent.ksql.parser.tree.Expression; +import io.confluent.ksql.parser.tree.WindowExpression; +import io.confluent.ksql.planner.plan.JoinNode; +import io.confluent.ksql.util.Pair; + +import java.util.ArrayList; +import java.util.HashMap; +import java.util.List; +import java.util.Map; +import java.util.Optional; + +public class Analysis { + + private StructuredDataSource into; + private Map intoProperties = new HashMap<>(); + private String intoFormat = null; + // TODO: Maybe have all as properties. At the moment this will only be set if format is avro. + private String intoAvroSchemaFilePath = null; + private String intoKafkaTopicName = null; + private List> fromDataSources = new ArrayList<>(); + private JoinNode join; + private Expression whereExpression = null; + private List selectExpressions = new ArrayList<>(); + private List selectExpressionAlias = new ArrayList<>(); + + private List groupByExpressions = new ArrayList<>(); + private WindowExpression windowExpression = null; + + private Expression havingExpression = null; + + private Optional limitClause = Optional.empty(); + + + public void addSelectItem(final Expression expression, final String alias) { + selectExpressions.add(expression); + selectExpressionAlias.add(alias); + } + + public StructuredDataSource getInto() { + return into; + } + + public void setInto(StructuredDataSource into) { + this.into = into; + } + + + public List> getFromDataSources() { + return fromDataSources; + } + + public void setFromDataSources(List> fromDataSources) { + this.fromDataSources = fromDataSources; + } + + public Expression getWhereExpression() { + return whereExpression; + } + + public void setWhereExpression(Expression whereExpression) { + this.whereExpression = whereExpression; + } + + public List getSelectExpressions() { + return selectExpressions; + } + + public void setSelectExpressions(List selectExpressions) { + this.selectExpressions = selectExpressions; + } + + public List getSelectExpressionAlias() { + return selectExpressionAlias; + } + + public void setSelectExpressionAlias(List selectExpressionAlias) { + this.selectExpressionAlias = selectExpressionAlias; + } + + public JoinNode getJoin() { + return join; + } + + public void setJoin(JoinNode join) { + this.join = join; + } + + public void setIntoFormat(String intoFormat) { + this.intoFormat = intoFormat; + } + + public void setIntoKafkaTopicName(String intoKafkaTopicName) { + this.intoKafkaTopicName = intoKafkaTopicName; + } + + public String getIntoFormat() { + return intoFormat; + } + + public String getIntoKafkaTopicName() { + return intoKafkaTopicName; + } + + public String getIntoAvroSchemaFilePath() { + return intoAvroSchemaFilePath; + } + + public void setIntoAvroSchemaFilePath(String intoAvroSchemaFilePath) { + this.intoAvroSchemaFilePath = intoAvroSchemaFilePath; + } + + public List getGroupByExpressions() { + return groupByExpressions; + } + + public void setGroupByExpressions(List groupByExpressions) { + this.groupByExpressions = groupByExpressions; + } + + public WindowExpression getWindowExpression() { + return windowExpression; + } + + public void setWindowExpression(WindowExpression windowExpression) { + this.windowExpression = windowExpression; + } + + public Expression getHavingExpression() { + return havingExpression; + } + + public void setHavingExpression(Expression havingExpression) { + this.havingExpression = havingExpression; + } + + public Map getIntoProperties() { + return intoProperties; + } + + public Optional getLimitClause() { + return limitClause; + } + + public void setLimitClause(Optional limitClause) { + this.limitClause = limitClause; + } +} + diff --git a/ksql-core/src/main/java/io/confluent/ksql/analyzer/AnalysisContext.java b/ksql-core/src/main/java/io/confluent/ksql/analyzer/AnalysisContext.java new file mode 100644 index 000000000000..7ec8aa31567f --- /dev/null +++ b/ksql-core/src/main/java/io/confluent/ksql/analyzer/AnalysisContext.java @@ -0,0 +1,45 @@ +/** + * Copyright 2017 Confluent Inc. + **/ + +package io.confluent.ksql.analyzer; + +import io.confluent.ksql.parser.tree.Node; + +public class AnalysisContext { + + public enum ParentType { + SELECT("select"), + SELECTITEM("selectitem"), + INTO("into"), + FROM("from"), + WHERE("where"), + GROUPBY("GROUPBY"); + private final String value; + + ParentType(String value) { + this.value = value; + } + + public String getValue() { + return value; + } + } + + final Node parentNode; + final ParentType parentType; + + public AnalysisContext(final Node parentNode, final ParentType parentType) { + this.parentNode = parentNode; + this.parentType = parentType; + } + + public Node getParentNode() { + return parentNode; + } + + public ParentType getParentType() { + return parentType; + } + +} diff --git a/ksql-core/src/main/java/io/confluent/ksql/analyzer/Analyzer.java b/ksql-core/src/main/java/io/confluent/ksql/analyzer/Analyzer.java new file mode 100644 index 000000000000..93ee33c39de7 --- /dev/null +++ b/ksql-core/src/main/java/io/confluent/ksql/analyzer/Analyzer.java @@ -0,0 +1,533 @@ +/** + * Copyright 2017 Confluent Inc. + **/ + +package io.confluent.ksql.analyzer; + +import io.confluent.ksql.ddl.DdlConfig; +import io.confluent.ksql.metastore.DataSource; +import io.confluent.ksql.metastore.KsqlStdOut; +import io.confluent.ksql.metastore.KsqlStream; +import io.confluent.ksql.metastore.KsqlTopic; +import io.confluent.ksql.metastore.MetaStore; +import io.confluent.ksql.metastore.StructuredDataSource; +import io.confluent.ksql.parser.tree.AliasedRelation; +import io.confluent.ksql.parser.tree.AllColumns; +import io.confluent.ksql.parser.tree.Cast; +import io.confluent.ksql.parser.tree.ComparisonExpression; +import io.confluent.ksql.parser.tree.DereferenceExpression; +import io.confluent.ksql.parser.tree.Expression; +import io.confluent.ksql.parser.tree.GroupBy; +import io.confluent.ksql.parser.tree.GroupingElement; +import io.confluent.ksql.parser.tree.Join; +import io.confluent.ksql.parser.tree.JoinOn; +import io.confluent.ksql.parser.tree.Node; +import io.confluent.ksql.parser.tree.QualifiedName; +import io.confluent.ksql.parser.tree.QualifiedNameReference; +import io.confluent.ksql.parser.tree.QuerySpecification; +import io.confluent.ksql.parser.tree.Select; +import io.confluent.ksql.parser.tree.SelectItem; +import io.confluent.ksql.parser.tree.SingleColumn; +import io.confluent.ksql.parser.tree.Table; +import io.confluent.ksql.parser.tree.WindowExpression; +import io.confluent.ksql.planner.DefaultTraversalVisitor; +import io.confluent.ksql.planner.plan.JoinNode; +import io.confluent.ksql.planner.plan.PlanNodeId; +import io.confluent.ksql.planner.plan.StructuredDataSourceNode; +import io.confluent.ksql.serde.KsqlTopicSerDe; +import io.confluent.ksql.serde.avro.KsqlAvroTopicSerDe; +import io.confluent.ksql.serde.delimited.KsqlDelimitedTopicSerDe; +import io.confluent.ksql.serde.json.KsqlJsonTopicSerDe; +import io.confluent.ksql.util.KsqlConfig; +import io.confluent.ksql.util.KsqlException; +import io.confluent.ksql.util.Pair; +import org.apache.kafka.connect.data.Field; +import org.apache.kafka.connect.data.Schema; + +import java.util.List; +import java.util.Optional; +import java.util.Set; + +import static java.lang.String.format; + +public class Analyzer extends DefaultTraversalVisitor { + + private Analysis analysis; + private MetaStore metaStore; + + public Analyzer(Analysis analysis, MetaStore metaStore) { + this.analysis = analysis; + this.metaStore = metaStore; + } + + @Override + protected Node visitQuerySpecification(final QuerySpecification node, + final AnalysisContext context) { + + process(node.getFrom().get(), + new AnalysisContext(null, AnalysisContext.ParentType.FROM)); + + process(node.getInto().get(), new AnalysisContext(null, + AnalysisContext.ParentType.INTO)); + if (!(analysis.getInto() instanceof KsqlStdOut)) { + analyzeNonStdOutSink(); + } + + process(node.getSelect(), new AnalysisContext(null, + AnalysisContext.ParentType.SELECT)); + if (node.getWhere().isPresent()) { + analyzeWhere(node.getWhere().get(), context); + } + if (node.getGroupBy().isPresent()) { + analyzeGroupBy(node.getGroupBy().get(), context); + } + + if (node.getWindowExpression().isPresent()) { + analyzeWindowExpression(node.getWindowExpression().get(), context); + } + + if (node.getHaving().isPresent()) { + analyzeHaving(node.getHaving().get(), context); + } + + if (node.getLimit().isPresent()) { + String limitStr = node.getLimit().get(); + Integer limitInt = Integer.parseInt(limitStr); + analysis.setLimitClause(Optional.of(limitInt)); + } + analyzeExpressions(); + + return null; + } + + private void analyzeNonStdOutSink() { + List> fromDataSources = analysis.getFromDataSources(); + + StructuredDataSource intoStructuredDataSource = (StructuredDataSource) analysis.getInto(); + String intoKafkaTopicName = analysis.getIntoKafkaTopicName(); + if (intoKafkaTopicName == null) { + intoKafkaTopicName = intoStructuredDataSource.getName(); + } + + KsqlTopicSerDe intoTopicSerde = fromDataSources.get(0).getLeft().getKsqlTopic() + .getKsqlTopicSerDe(); + if (analysis.getIntoFormat() != null) { + switch (analysis.getIntoFormat().toUpperCase()) { + case DataSource.AVRO_SERDE_NAME: + intoTopicSerde = new KsqlAvroTopicSerDe(null); + break; + case DataSource.JSON_SERDE_NAME: + intoTopicSerde = new KsqlJsonTopicSerDe(null); + break; + case DataSource.DELIMITED_SERDE_NAME: + intoTopicSerde = new KsqlDelimitedTopicSerDe(); + break; + default: + throw new KsqlException( + String.format("Unsupported format: %s", analysis.getIntoFormat())); + } + } else { + if (intoTopicSerde instanceof KsqlAvroTopicSerDe) { + intoTopicSerde = new KsqlAvroTopicSerDe(null); + } + } + + KsqlTopic newIntoKsqlTopic = new KsqlTopic(intoKafkaTopicName, + intoKafkaTopicName, intoTopicSerde); + KsqlStream intoKsqlStream = new KsqlStream(intoStructuredDataSource.getName(), + null, null, null, + newIntoKsqlTopic); + analysis.setInto(intoKsqlStream); + } + + private void analyzeExpressions() { + Schema schema = analysis.getFromDataSources().get(0).getLeft().getSchema(); + boolean isJoinSchema = false; + if (analysis.getJoin() != null) { + schema = analysis.getJoin().getSchema(); + isJoinSchema = true; + } + ExpressionAnalyzer expressionAnalyzer = new ExpressionAnalyzer(schema, isJoinSchema); + + for (Expression selectExpression: analysis.getSelectExpressions()) { + expressionAnalyzer.analyzeExpression(selectExpression); + } + if (analysis.getWhereExpression() != null) { + expressionAnalyzer.analyzeExpression(analysis.getWhereExpression()); + } + if (!analysis.getGroupByExpressions().isEmpty()) { + for (Expression expression: analysis.getGroupByExpressions()) { + expressionAnalyzer.analyzeExpression(expression); + } + } + if (analysis.getHavingExpression() != null) { + expressionAnalyzer.analyzeExpression(analysis.getHavingExpression()); + } + } + + @Override + protected Node visitJoin(final Join node, final AnalysisContext context) { + AliasedRelation left = (AliasedRelation) process(node.getLeft(), context); + AliasedRelation right = (AliasedRelation) process(node.getRight(), context); + + String leftSideName = ((Table) left.getRelation()).getName().getSuffix(); + StructuredDataSource leftDataSource = metaStore.getSource(leftSideName); + if (leftDataSource == null) { + throw new KsqlException(format("Resource %s does not exist.", leftSideName)); + } + leftDataSource = timestampColumn(left, leftDataSource); + + String rightSideName = ((Table) right.getRelation()).getName().getSuffix(); + StructuredDataSource rightDataSource = metaStore.getSource(rightSideName); + if (rightDataSource == null) { + throw new KsqlException(format("Resource %s does not exist.", rightSideName)); + } + + rightDataSource = timestampColumn(right, rightDataSource); + + String leftAlias = left.getAlias(); + String rightAlias = right.getAlias(); + StructuredDataSourceNode + leftSourceKafkaTopicNode = + new StructuredDataSourceNode(new PlanNodeId("KafkaTopic_Left"), leftDataSource.getSchema(), + leftDataSource.getKeyField(), + leftDataSource.getTimestampField(), + leftDataSource.getKsqlTopic().getTopicName(), + leftAlias, leftDataSource.getDataSourceType(), + leftDataSource); + StructuredDataSourceNode + rightSourceKafkaTopicNode = + new StructuredDataSourceNode(new PlanNodeId("KafkaTopic_Right"), + rightDataSource.getSchema(), + rightDataSource.getKeyField(), + rightDataSource.getTimestampField(), + rightDataSource.getKsqlTopic().getTopicName(), + rightAlias, rightDataSource.getDataSourceType(), + rightDataSource); + + JoinNode.Type joinType; + switch (node.getType()) { + case INNER: + joinType = JoinNode.Type.INNER; + break; + case LEFT: + joinType = JoinNode.Type.LEFT; + break; + case RIGHT: + joinType = JoinNode.Type.RIGHT; + break; + case CROSS: + joinType = JoinNode.Type.CROSS; + break; + case FULL: + joinType = JoinNode.Type.FULL; + break; + default: + throw new KsqlException("Join type is not supported: " + node.getType().name()); + } + + JoinOn joinOn = (JoinOn) (node.getCriteria().get()); + ComparisonExpression comparisonExpression = (ComparisonExpression) joinOn.getExpression(); + + String leftKeyFieldName = fetchKeyFieldName(comparisonExpression.getLeft()); + String rightKeyFieldName = fetchKeyFieldName(comparisonExpression.getRight()); + + if (comparisonExpression.getType() != ComparisonExpression.Type.EQUAL) { + throw new KsqlException("Join criteria is not supported."); + } + + JoinNode joinNode = + new JoinNode(new PlanNodeId("Join"), joinType, leftSourceKafkaTopicNode, + rightSourceKafkaTopicNode, leftKeyFieldName, rightKeyFieldName, leftAlias, + rightAlias); + analysis.setJoin(joinNode); + return null; + } + + private String fetchKeyFieldName(Expression expression) { + if (expression instanceof DereferenceExpression) { + DereferenceExpression + leftDereferenceExpression = + (DereferenceExpression) expression; + return leftDereferenceExpression.getFieldName(); + } else if (expression instanceof QualifiedNameReference) { + QualifiedNameReference + leftQualifiedNameReference = + (QualifiedNameReference) expression; + return leftQualifiedNameReference.getName().getSuffix(); + } else { + throw new KsqlException("Join criteria is not supported."); + } + } + + private StructuredDataSource timestampColumn(AliasedRelation aliasedRelation, + StructuredDataSource + structuredDataSource) { + if (((Table) aliasedRelation.getRelation()).getProperties() != null) { + if (((Table) aliasedRelation.getRelation()).getProperties() + .get(DdlConfig.TIMESTAMP_NAME_PROPERTY) != null) { + String timestampFieldName = (((Table) aliasedRelation.getRelation())) + .getProperties().get(DdlConfig.TIMESTAMP_NAME_PROPERTY).toString().toUpperCase(); + if (!(timestampFieldName.startsWith("'") && timestampFieldName.endsWith("'"))) { + throw new KsqlException("Property name should be String with single qoute."); + } + timestampFieldName = timestampFieldName.substring(1, timestampFieldName.length() - 1); + structuredDataSource = structuredDataSource.cloneWithTimeField(timestampFieldName); + } + } + return structuredDataSource; + } + + @Override + protected Node visitAliasedRelation(AliasedRelation node, AnalysisContext context) { + String structuredDataSourceName = ((Table) node.getRelation()).getName().getSuffix(); + if (metaStore.getSource(structuredDataSourceName) + == null) { + throw new KsqlException(structuredDataSourceName + " does not exist."); + } + + StructuredDataSource structuredDataSource = metaStore.getSource(structuredDataSourceName); + + if (((Table) node.getRelation()).getProperties() != null) { + if (((Table) node.getRelation()).getProperties().get(DdlConfig.TIMESTAMP_NAME_PROPERTY) + != null) { + String timestampFieldName = ((Table) node.getRelation()).getProperties() + .get(DdlConfig.TIMESTAMP_NAME_PROPERTY).toString().toUpperCase(); + if (!timestampFieldName.startsWith("'") && !timestampFieldName.endsWith("'")) { + throw new KsqlException("Property name should be String with single qoute."); + } + timestampFieldName = timestampFieldName.substring(1, timestampFieldName.length() - 1); + structuredDataSource = structuredDataSource.cloneWithTimeField(timestampFieldName); + } + } + + Pair + fromDataSource = + new Pair<>( + structuredDataSource, + node.getAlias()); + analysis.getFromDataSources().add(fromDataSource); + return node; + } + + @Override + protected Node visitTable(final Table node, final AnalysisContext context) { + + StructuredDataSource into; + if (node.isStdOut) { + into = + new KsqlStdOut(KsqlStdOut.KSQL_STDOUT_NAME, null, null, + null, StructuredDataSource.DataSourceType.KSTREAM); + + } else if (context.getParentType() == AnalysisContext.ParentType.INTO) { + into = analyzeNonStdOutTable(node); + } else { + throw new KsqlException("INTO clause is not set correctly!"); + } + + analysis.setInto(into); + return null; + } + + + + @Override + protected Node visitCast(final Cast node, final AnalysisContext context) { + return process(node.getExpression(), context); + } + + @Override + protected Node visitSelect(final Select node, final AnalysisContext context) { + for (SelectItem selectItem : node.getSelectItems()) { + if (selectItem instanceof AllColumns) { + // expand * and T.* + AllColumns allColumns = (AllColumns) selectItem; + if ((this.analysis.getFromDataSources() == null) || (this.analysis.getFromDataSources() + .isEmpty())) { + throw new KsqlException("FROM clause was not resolved!"); + } + if (analysis.getJoin() != null) { + JoinNode joinNode = analysis.getJoin(); + for (Field field : joinNode.getLeft().getSchema().fields()) { + QualifiedNameReference + qualifiedNameReference = + new QualifiedNameReference(allColumns.getLocation().get(), QualifiedName + .of(joinNode.getLeftAlias() + "." + field.name())); + analysis.addSelectItem(qualifiedNameReference, + joinNode.getLeftAlias() + "_" + field.name()); + } + for (Field field : joinNode.getRight().getSchema().fields()) { + QualifiedNameReference qualifiedNameReference = + new QualifiedNameReference(allColumns.getLocation().get(), QualifiedName + .of(joinNode.getRightAlias() + "." + field.name())); + analysis.addSelectItem(qualifiedNameReference, + joinNode.getRightAlias() + "_" + field.name()); + } + } else { + for (Field field : this.analysis.getFromDataSources().get(0).getLeft().getSchema() + .fields()) { + QualifiedNameReference + qualifiedNameReference = + new QualifiedNameReference(allColumns.getLocation().get(), QualifiedName + .of(this.analysis.getFromDataSources().get(0).getRight() + "." + field.name())); + analysis.addSelectItem(qualifiedNameReference, field.name()); + } + } + } else if (selectItem instanceof SingleColumn) { + SingleColumn column = (SingleColumn) selectItem; + analysis.addSelectItem(column.getExpression(), column.getAlias().get()); + } else { + throw new IllegalArgumentException( + "Unsupported SelectItem type: " + selectItem.getClass().getName()); + } + } + return null; + } + + @Override + protected Node visitQualifiedNameReference(final QualifiedNameReference node, + final AnalysisContext context) { + return visitExpression(node, context); + } + + @Override + protected Node visitGroupBy(final GroupBy node, final AnalysisContext context) { + return null; + } + + private StructuredDataSource analyzeFrom(final QuerySpecification node, + final AnalysisContext context) { + return null; + } + + private void analyzeWhere(final Node node, final AnalysisContext context) { + analysis.setWhereExpression((Expression) node); + } + + private void analyzeGroupBy(final GroupBy groupBy, final AnalysisContext context) { + for (GroupingElement groupingElement : groupBy.getGroupingElements()) { + Set groupingSet = groupingElement.enumerateGroupingSets().get(0); + analysis.getGroupByExpressions().addAll(groupingSet); + } + } + + private void analyzeWindowExpression(final WindowExpression windowExpression, + final AnalysisContext context) { + analysis.setWindowExpression(windowExpression); + } + + private void analyzeHaving(final Node node, final AnalysisContext context) { + analysis.setHavingExpression((Expression) node); + } + + private StructuredDataSource analyzeNonStdOutTable(final Table node) { + StructuredDataSource into = new KsqlStream(node.getName().getSuffix(), null, + null, null, null); + + setIntoProperties(into, node); + return into; + } + + private void setIntoProperties(final StructuredDataSource into, final Table node) { + if (node.getProperties().get(DdlConfig.VALUE_FORMAT_PROPERTY) != null) { + setIntoTopicFormat(into, node); + } + + if (node.getProperties().get(DdlConfig.KAFKA_TOPIC_NAME_PROPERTY) != null) { + setIntoTopicName(node); + } + + if (node.getProperties().get(DdlConfig.PARTITION_BY_PROPERTY) != null) { + String intoPartitionByColumnName = node.getProperties() + .get(DdlConfig.PARTITION_BY_PROPERTY).toString().toUpperCase(); + analysis.getIntoProperties().put(DdlConfig.PARTITION_BY_PROPERTY, + intoPartitionByColumnName); + } + + if (node.getProperties().get(KsqlConfig.SINK_TIMESTAMP_COLUMN_NAME) != null) { + setIntoTimestampColumn(node); + } + + if (node.getProperties().get(KsqlConfig.SINK_NUMBER_OF_PARTITIONS) != null) { + try { + int numberOfPartitions = Integer.parseInt(node.getProperties() + .get(KsqlConfig.SINK_NUMBER_OF_PARTITIONS) + .toString()); + analysis.getIntoProperties().put(KsqlConfig.SINK_NUMBER_OF_PARTITIONS, + numberOfPartitions); + + } catch (NumberFormatException e) { + throw new KsqlException("Invalid number of partitions in WITH clause: " + + node.getProperties().get(KsqlConfig.SINK_NUMBER_OF_PARTITIONS) + .toString()); + } + } + + if (node.getProperties().get(KsqlConfig.SINK_NUMBER_OF_REPLICATIONS) != null) { + try { + short numberOfReplications = + Short.parseShort(node.getProperties().get(KsqlConfig.SINK_NUMBER_OF_REPLICATIONS) + .toString()); + analysis.getIntoProperties() + .put(KsqlConfig.SINK_NUMBER_OF_REPLICATIONS, numberOfReplications); + } catch (NumberFormatException e) { + throw new KsqlException("Invalid number of replications in WITH clause: " + node + .getProperties().get(KsqlConfig.SINK_NUMBER_OF_REPLICATIONS).toString()); + } + } + } + + private void setIntoTopicName(final Table node) { + String + intoKafkaTopicName = + node.getProperties().get(DdlConfig.KAFKA_TOPIC_NAME_PROPERTY).toString(); + if (!intoKafkaTopicName.startsWith("'") && !intoKafkaTopicName.endsWith("'")) { + throw new KsqlException( + intoKafkaTopicName + " value is string and should be enclosed between " + "\"'\"."); + } + intoKafkaTopicName = intoKafkaTopicName.substring(1, intoKafkaTopicName.length() - 1); + analysis.setIntoKafkaTopicName(intoKafkaTopicName); + analysis.getIntoProperties().put(DdlConfig.KAFKA_TOPIC_NAME_PROPERTY, intoKafkaTopicName); + } + + private void setIntoTopicFormat(final StructuredDataSource into, final Table node) { + String serde = node.getProperties().get(DdlConfig.VALUE_FORMAT_PROPERTY).toString(); + if (!serde.startsWith("'") && !serde.endsWith("'")) { + throw new KsqlException( + serde + " value is string and should be enclosed between " + "\"'\"."); + } + serde = serde.substring(1, serde.length() - 1); + analysis.setIntoFormat(serde); + analysis.getIntoProperties().put(DdlConfig.VALUE_FORMAT_PROPERTY, serde); + if ("AVRO".equals(serde)) { + String avroSchemaFilePath = "/tmp/" + into.getName() + ".avro"; + if (node.getProperties().get(DdlConfig.AVRO_SCHEMA_FILE) != null) { + avroSchemaFilePath = node.getProperties().get(DdlConfig.AVRO_SCHEMA_FILE).toString(); + if (!avroSchemaFilePath.startsWith("'") && !avroSchemaFilePath.endsWith("'")) { + throw new KsqlException( + avroSchemaFilePath + " value is string and should be enclosed between " + + "\"'\"."); + } + avroSchemaFilePath = avroSchemaFilePath.substring(1, avroSchemaFilePath.length() - 1); + } + analysis.setIntoAvroSchemaFilePath(avroSchemaFilePath); + analysis.getIntoProperties().put(DdlConfig.AVRO_SCHEMA_FILE, avroSchemaFilePath); + } + } + + private void setIntoTimestampColumn(final Table node) { + String + intoTimestampColumnName = node.getProperties() + .get(KsqlConfig.SINK_TIMESTAMP_COLUMN_NAME).toString().toUpperCase(); + if (!intoTimestampColumnName.startsWith("'") && !intoTimestampColumnName.endsWith("'")) { + throw new KsqlException( + intoTimestampColumnName + " value is string and should be enclosed between " + + "\"'\"."); + } + intoTimestampColumnName = intoTimestampColumnName.substring(1, + intoTimestampColumnName + .length() - 1); + analysis.getIntoProperties().put(KsqlConfig.SINK_TIMESTAMP_COLUMN_NAME, + intoTimestampColumnName); + } +} \ No newline at end of file diff --git a/ksql-core/src/main/java/io/confluent/ksql/analyzer/ExpressionAnalyzer.java b/ksql-core/src/main/java/io/confluent/ksql/analyzer/ExpressionAnalyzer.java new file mode 100644 index 000000000000..a2ef6620dad3 --- /dev/null +++ b/ksql-core/src/main/java/io/confluent/ksql/analyzer/ExpressionAnalyzer.java @@ -0,0 +1,131 @@ +/** + * Copyright 2017 Confluent Inc. + **/ + +package io.confluent.ksql.analyzer; + +import io.confluent.ksql.function.KsqlFunction; +import io.confluent.ksql.function.KsqlFunctions; +import io.confluent.ksql.parser.tree.ArithmeticBinaryExpression; +import io.confluent.ksql.parser.tree.AstVisitor; +import io.confluent.ksql.parser.tree.Cast; +import io.confluent.ksql.parser.tree.ComparisonExpression; +import io.confluent.ksql.parser.tree.DereferenceExpression; +import io.confluent.ksql.parser.tree.Expression; +import io.confluent.ksql.parser.tree.FunctionCall; +import io.confluent.ksql.parser.tree.IsNotNullPredicate; +import io.confluent.ksql.parser.tree.IsNullPredicate; +import io.confluent.ksql.parser.tree.LikePredicate; +import io.confluent.ksql.parser.tree.LogicalBinaryExpression; +import io.confluent.ksql.parser.tree.NotExpression; +import io.confluent.ksql.parser.tree.QualifiedNameReference; +import io.confluent.ksql.util.SchemaUtil; +import org.apache.kafka.connect.data.Field; +import org.apache.kafka.connect.data.Schema; + +import java.util.Optional; + + +public class ExpressionAnalyzer { + final Schema schema; + final boolean isJoinSchema; + + public ExpressionAnalyzer(Schema schema, boolean isJoinSchema) { + this.schema = schema; + this.isJoinSchema = isJoinSchema; + } + + public void analyzeExpression(Expression expression) { + Visitor visitor = new Visitor(schema); + visitor.process(expression, null); + } + + private class Visitor + extends AstVisitor { + + final Schema schema; + + Visitor(Schema schema) { + this.schema = schema; + } + + protected Object visitLikePredicate(LikePredicate node, Object context) { + process(node.getValue(), null); + return null; + } + + protected Object visitFunctionCall(FunctionCall node, Object context) { + String functionName = node.getName().getSuffix(); + KsqlFunction ksqlFunction = KsqlFunctions.getFunction(functionName); + for (Expression argExpr : node.getArguments()) { + process(argExpr, null); + } + return null; + } + + protected Object visitArithmeticBinary(ArithmeticBinaryExpression node, Object context) { + process(node.getLeft(), null); + process(node.getRight(), null); + return null; + } + + protected Object visitIsNotNullPredicate(IsNotNullPredicate node, Object context) { + return process(node.getValue(), context); + } + + protected Object visitIsNullPredicate(IsNullPredicate node, Object context) { + return process(node.getValue(), context); + } + + protected Object visitLogicalBinaryExpression(LogicalBinaryExpression node, Object context) { + process(node.getLeft(), null); + process(node.getRight(), null); + return null; + } + + @Override + protected Object visitComparisonExpression(ComparisonExpression node, Object context) { + process(node.getLeft(), null); + process(node.getRight(), null); + return null; + } + + @Override + protected Object visitNotExpression(NotExpression node, Object context) { + return process(node.getValue(), null); + } + + @Override + protected Object visitDereferenceExpression(DereferenceExpression node, Object context) { + String columnName = node.getFieldName(); + if (isJoinSchema) { + columnName = node.toString(); + } + Optional schemaField = SchemaUtil.getFieldByName(schema, columnName); + if (!schemaField.isPresent()) { + throw new RuntimeException( + String.format("Column %s cannot be resolved.", columnName)); + } + return null; + } + + @Override + protected Object visitCast(Cast node, Object context) { + + process(node.getExpression(), context); + return null; + } + + @Override + protected Object visitQualifiedNameReference(QualifiedNameReference node, Object context) { + String columnName = node.getName().getSuffix(); + Optional schemaField = SchemaUtil.getFieldByName(schema, columnName); + if (!schemaField.isPresent()) { + throw new RuntimeException( + String.format("Column %s cannot be resolved.", columnName)); + } + return null; + } + } + +} diff --git a/ksql-core/src/main/java/io/confluent/ksql/ddl/DdlConfig.java b/ksql-core/src/main/java/io/confluent/ksql/ddl/DdlConfig.java new file mode 100644 index 000000000000..e9cb9d1969e0 --- /dev/null +++ b/ksql-core/src/main/java/io/confluent/ksql/ddl/DdlConfig.java @@ -0,0 +1,21 @@ +/** + * Copyright 2017 Confluent Inc. + * + **/ + +package io.confluent.ksql.ddl; + +public class DdlConfig { + + public static final String VALUE_FORMAT_PROPERTY = "VALUE_FORMAT"; + public static final String AVRO_SCHEMA_FILE = "AVROSCHEMAFILE"; + public static final String AVRO_SCHEMA = "AVROSCHEMA"; + public static final String KAFKA_TOPIC_NAME_PROPERTY = "KAFKA_TOPIC"; + public static final String TOPIC_NAME_PROPERTY = "REGISTERED_TOPIC"; + public static final String STATE_STORE_NAME_PROPERTY = "STATESTORE"; + public static final String KEY_NAME_PROPERTY = "KEY"; + public static final String IS_WINDOWED_PROPERTY = "WINDOWED"; + public static final String TIMESTAMP_NAME_PROPERTY = "TIMESTAMP"; + public static final String PARTITION_BY_PROPERTY = "PARTITION_BY"; + public static final String SCHEMA_FILE_CONTENT_PROPERTY = "ksql.schema.file.content"; +} diff --git a/ksql-core/src/main/java/io/confluent/ksql/ddl/commands/AbstractCreateStreamCommand.java b/ksql-core/src/main/java/io/confluent/ksql/ddl/commands/AbstractCreateStreamCommand.java new file mode 100644 index 000000000000..f94371b0504a --- /dev/null +++ b/ksql-core/src/main/java/io/confluent/ksql/ddl/commands/AbstractCreateStreamCommand.java @@ -0,0 +1,194 @@ +/** + * Copyright 2017 Confluent Inc. + **/ + +package io.confluent.ksql.ddl.commands; + +import io.confluent.ksql.ddl.DdlConfig; +import io.confluent.ksql.metastore.MetaStore; +import io.confluent.ksql.parser.tree.AbstractStreamCreateStatement; +import io.confluent.ksql.parser.tree.Expression; +import io.confluent.ksql.parser.tree.TableElement; +import io.confluent.ksql.util.KafkaTopicClient; +import io.confluent.ksql.util.KsqlException; +import io.confluent.ksql.util.KsqlPreconditions; +import io.confluent.ksql.util.SchemaUtil; +import io.confluent.ksql.util.StringUtil; +import org.apache.kafka.connect.data.Schema; +import org.apache.kafka.connect.data.SchemaBuilder; + +import java.util.List; +import java.util.Map; + + +/** + * Base class of create table/stream command + */ +public abstract class AbstractCreateStreamCommand implements DDLCommand { + + String sourceName; + String topicName; + Schema schema; + String keyColumnName; + String timestampColumnName; + boolean isWindowed; + RegisterTopicCommand registerTopicCommand; + KafkaTopicClient kafkaTopicClient; + + public AbstractCreateStreamCommand(final AbstractStreamCreateStatement statement, + Map overriddenProperties, + KafkaTopicClient kafkaTopicClient) { + // TODO: get rid of toUpperCase in following code + Map properties = statement.getProperties(); + this.sourceName = statement.getName().getSuffix(); + this.topicName = this.sourceName; + this.kafkaTopicClient = kafkaTopicClient; + + if (properties.containsKey(DdlConfig.TOPIC_NAME_PROPERTY) && + !properties.containsKey(DdlConfig.VALUE_FORMAT_PROPERTY)) { + this.topicName = StringUtil.cleanQuotes( + properties.get(DdlConfig.TOPIC_NAME_PROPERTY).toString().toUpperCase()); + + checkTopicNameNotNull(properties); + } else { + this.registerTopicCommand = registerTopicFirst(properties, overriddenProperties); + } + + + this.schema = getStreamTableSchema(statement.getElements()); + + + this.keyColumnName = ""; + + if (properties.containsKey(DdlConfig.KEY_NAME_PROPERTY)) { + keyColumnName = properties.get(DdlConfig.KEY_NAME_PROPERTY) + .toString().toUpperCase(); + + keyColumnName = StringUtil.cleanQuotes(keyColumnName); + } + + this.timestampColumnName = ""; + if (properties.containsKey(DdlConfig.TIMESTAMP_NAME_PROPERTY)) { + timestampColumnName = properties.get(DdlConfig.TIMESTAMP_NAME_PROPERTY) + .toString().toUpperCase(); + timestampColumnName = StringUtil.cleanQuotes(timestampColumnName); + if (SchemaUtil.getFieldByName(schema, timestampColumnName) + .get().schema().type() != Schema + .Type.INT64) { + throw new KsqlException("Timestamp column, " + timestampColumnName + ", should be LONG" + + "(INT64)."); + } + } + + + this.isWindowed = false; + if (properties.containsKey(DdlConfig.IS_WINDOWED_PROPERTY)) { + String isWindowedProp = properties.get(DdlConfig.IS_WINDOWED_PROPERTY) + .toString().toUpperCase(); + try { + isWindowed = Boolean.parseBoolean(isWindowedProp); + } catch (Exception e) { + throw new KsqlException("isWindowed property is not set correctly: " + isWindowedProp); + } + } + } + + private void checkTopicNameNotNull(Map properties) { + // TODO: move the check to grammer + KsqlPreconditions.checkNotNull( + properties.get(DdlConfig.TOPIC_NAME_PROPERTY), + "Topic name should be set in WITH clause."); + } + + private SchemaBuilder getStreamTableSchema(List tableElementList) { + SchemaBuilder tableSchema = SchemaBuilder.struct(); + for (TableElement tableElement : tableElementList) { + if (tableElement.getName().equalsIgnoreCase(SchemaUtil.ROWTIME_NAME) || tableElement.getName() + .equalsIgnoreCase(SchemaUtil.ROWKEY_NAME)) { + throw new KsqlException(SchemaUtil.ROWTIME_NAME + "/" + SchemaUtil.ROWKEY_NAME + " are " + + "reserved " + + "token for " + + "implicit " + + "column." + + " You cannot use them as a column name."); + + } + tableSchema = tableSchema.field(tableElement.getName(), getKsqlType(tableElement.getType())); + } + + return tableSchema; + } + + //TODO: this needs to be moved to proper place to be accessible to everyone. Temporary! + private Schema getKsqlType(final String sqlType) { + switch (sqlType) { + case "VARCHAR": + case "STRING": + return Schema.STRING_SCHEMA; + case "BOOLEAN": + case "BOOL": + return Schema.BOOLEAN_SCHEMA; + case "INTEGER": + case "INT": + return Schema.INT32_SCHEMA; + case "BIGINT": + case "LONG": + return Schema.INT64_SCHEMA; + case "DOUBLE": + return Schema.FLOAT64_SCHEMA; + default: + return getKsqlComplexType(sqlType); + } + } + + private Schema getKsqlComplexType(final String sqlType) { + if (sqlType.startsWith("ARRAY")) { + return SchemaBuilder + .array(getKsqlType(sqlType.substring("ARRAY".length() + 1, sqlType.length() - 1))); + } else if (sqlType.startsWith("MAP")) { + //TODO: For now only primitive data types for map are supported. Will have to add + // nested types. + String[] mapTypesStrs = sqlType.substring("MAP".length() + 1, sqlType.length() - 1) + .trim().split(","); + if (mapTypesStrs.length != 2) { + throw new KsqlException("Map type is not defined correctly.: " + sqlType); + } + String keyType = mapTypesStrs[0].trim(); + String valueType = mapTypesStrs[1].trim(); + return SchemaBuilder.map(getKsqlType(keyType), getKsqlType(valueType)); + } + throw new KsqlException("Unsupported type: " + sqlType); + } + + protected void checkMetaData(MetaStore metaStore, String sourceName, String topicName) { + // TODO: move the check to the runtime since it accesses metaStore + KsqlPreconditions.checkArgument( + metaStore.getSource(sourceName) == null, + "Source already exists."); + + KsqlPreconditions.checkNotNull( + metaStore.getTopic(topicName), + String.format("The corresponding topic, %s, does not exist.", topicName)); + } + + protected RegisterTopicCommand registerTopicFirst(Map properties, + Map overriddenProperties) { + if (properties.size() == 0) { + throw new KsqlException("Create Stream/Table statement needs WITH clause."); + } + if (!properties.containsKey(DdlConfig.VALUE_FORMAT_PROPERTY)) { + throw new KsqlException("Topic format(" + DdlConfig.VALUE_FORMAT_PROPERTY + ") should be set " + + "in WITH clause."); + } + if (!properties.containsKey(DdlConfig.KAFKA_TOPIC_NAME_PROPERTY)) { + throw new KsqlException("Corresponding kafka topic(" + DdlConfig.KAFKA_TOPIC_NAME_PROPERTY + + ") should be set in WITH clause."); + } + String kafkaTopicName = StringUtil.cleanQuotes( + properties.get(DdlConfig.KAFKA_TOPIC_NAME_PROPERTY).toString()); + if (!kafkaTopicClient.isTopicExists(kafkaTopicName)) { + throw new KsqlException("Kafka topic does not exist: " + kafkaTopicName); + } + return new RegisterTopicCommand(this.topicName, false, properties, overriddenProperties); + } +} diff --git a/ksql-core/src/main/java/io/confluent/ksql/ddl/commands/CreateStreamCommand.java b/ksql-core/src/main/java/io/confluent/ksql/ddl/commands/CreateStreamCommand.java new file mode 100644 index 000000000000..6b8c8da0b389 --- /dev/null +++ b/ksql-core/src/main/java/io/confluent/ksql/ddl/commands/CreateStreamCommand.java @@ -0,0 +1,39 @@ +/** + * Copyright 2017 Confluent Inc. + **/ + +package io.confluent.ksql.ddl.commands; + +import java.util.Map; + +import io.confluent.ksql.metastore.KsqlStream; +import io.confluent.ksql.metastore.MetaStore; +import io.confluent.ksql.parser.tree.CreateStream; +import io.confluent.ksql.util.KafkaTopicClient; + + +public class CreateStreamCommand extends AbstractCreateStreamCommand { + public CreateStreamCommand(CreateStream createStream, Map overriddenProperties, + KafkaTopicClient kafkaTopicClient) { + super(createStream, overriddenProperties, kafkaTopicClient); + } + + @Override + public DDLCommandResult run(MetaStore metaStore) { + if (registerTopicCommand != null) { + registerTopicCommand.run(metaStore); + } + checkMetaData(metaStore, sourceName, topicName); + KsqlStream ksqlStream = new KsqlStream(sourceName, schema, + (keyColumnName.length() == 0) ? null : + schema.field(keyColumnName), + (timestampColumnName.length() == 0) ? null : + schema.field(timestampColumnName), + metaStore.getTopic(topicName)); + + // TODO: Need to check if the topic exists. + // Add the topic to the metastore + metaStore.putSource(ksqlStream.cloneWithTimeKeyColumns()); + return new DDLCommandResult(true, "Stream created"); + } +} diff --git a/ksql-core/src/main/java/io/confluent/ksql/ddl/commands/CreateTableCommand.java b/ksql-core/src/main/java/io/confluent/ksql/ddl/commands/CreateTableCommand.java new file mode 100644 index 000000000000..59a1d2fa8cde --- /dev/null +++ b/ksql-core/src/main/java/io/confluent/ksql/ddl/commands/CreateTableCommand.java @@ -0,0 +1,57 @@ +/** + * Copyright 2017 Confluent Inc. + **/ + +package io.confluent.ksql.ddl.commands; + +import io.confluent.ksql.ddl.DdlConfig; +import io.confluent.ksql.metastore.KsqlTable; +import io.confluent.ksql.metastore.MetaStore; +import io.confluent.ksql.parser.tree.CreateTable; +import io.confluent.ksql.parser.tree.Expression; +import io.confluent.ksql.util.KafkaTopicClient; +import io.confluent.ksql.util.KsqlPreconditions; +import io.confluent.ksql.util.StringUtil; + +import java.util.Map; + +public class CreateTableCommand extends AbstractCreateStreamCommand { + + String stateStoreName; + + public CreateTableCommand(CreateTable createTable, Map overriddenProperties, + KafkaTopicClient kafkaTopicClient) { + super(createTable, overriddenProperties, kafkaTopicClient); + + Map properties = createTable.getProperties(); + + if (properties.containsKey(DdlConfig.STATE_STORE_NAME_PROPERTY)) { + this.stateStoreName = StringUtil.cleanQuotes(properties.get(DdlConfig.STATE_STORE_NAME_PROPERTY).toString()); + } else { + this.stateStoreName = createTable.getName().toString() + "_statestore"; + } + + + } + + @Override + public DDLCommandResult run(MetaStore metaStore) { + if (registerTopicCommand != null) { + registerTopicCommand.run(metaStore); + } + checkMetaData(metaStore, sourceName, topicName); + KsqlTable ksqlTable = new KsqlTable(sourceName, schema, + (keyColumnName.length() == 0) ? null : + schema.field(keyColumnName), + (timestampColumnName.length() == 0) ? null : + schema.field(timestampColumnName), + metaStore.getTopic(topicName), + stateStoreName, isWindowed); + + // TODO: Need to check if the topic exists. + // Add the topic to the metastore + metaStore.putSource(ksqlTable.cloneWithTimeKeyColumns()); + return new DDLCommandResult(true, "Table created"); + } + +} diff --git a/ksql-core/src/main/java/io/confluent/ksql/ddl/commands/DDLCommand.java b/ksql-core/src/main/java/io/confluent/ksql/ddl/commands/DDLCommand.java new file mode 100644 index 000000000000..4fda595410e2 --- /dev/null +++ b/ksql-core/src/main/java/io/confluent/ksql/ddl/commands/DDLCommand.java @@ -0,0 +1,13 @@ +/** + * Copyright 2017 Confluent Inc. + **/ + +package io.confluent.ksql.ddl.commands; + +import io.confluent.ksql.metastore.MetaStore; + +public interface DDLCommand { + + DDLCommandResult run(MetaStore metaStore); + +} diff --git a/ksql-core/src/main/java/io/confluent/ksql/ddl/commands/DDLCommandExec.java b/ksql-core/src/main/java/io/confluent/ksql/ddl/commands/DDLCommandExec.java new file mode 100644 index 000000000000..9917c8ad49f6 --- /dev/null +++ b/ksql-core/src/main/java/io/confluent/ksql/ddl/commands/DDLCommandExec.java @@ -0,0 +1,56 @@ +/** + * Copyright 2017 Confluent Inc. + **/ + +package io.confluent.ksql.ddl.commands; + +import io.confluent.ksql.metastore.MetaStore; +import io.confluent.ksql.exception.ExceptionUtil; +import io.confluent.ksql.util.KsqlException; +import org.slf4j.Logger; +import org.slf4j.LoggerFactory; + +/** + * Execute DDL Commands + */ +public class DDLCommandExec { + + private static final Logger LOGGER = LoggerFactory.getLogger(DDLCommandExec.class); + private final MetaStore metaStore; + + public DDLCommandExec(MetaStore metaStore) { + this.metaStore = metaStore; + } + + /** + * execute on temp metaStore + * @param ddlCommand + * @return + */ + public DDLCommandResult tryExecute(DDLCommand ddlCommand, MetaStore tempMetaStore) { + if (tempMetaStore == metaStore) { + throw new KsqlException("Try to execute DDLCommand on tempMetaStore, but getting the real MetaStore."); + } + return executeOnMetaStore(ddlCommand, tempMetaStore); + } + + /** + * execute on real metaStore + * @param ddlCommand + * @return + */ + public DDLCommandResult execute(DDLCommand ddlCommand) { + return executeOnMetaStore(ddlCommand, this.metaStore); + } + + private static DDLCommandResult executeOnMetaStore(DDLCommand ddlCommand, MetaStore metaStore) { + // TODO: create new task to run + try { + return ddlCommand.run(metaStore); + } catch (Exception e) { + String stackTrace = ExceptionUtil.stackTraceToString(e); + LOGGER.error(stackTrace); + return new DDLCommandResult(false, stackTrace); + } + } +} diff --git a/ksql-core/src/main/java/io/confluent/ksql/ddl/commands/DDLCommandResult.java b/ksql-core/src/main/java/io/confluent/ksql/ddl/commands/DDLCommandResult.java new file mode 100644 index 000000000000..b99935f09796 --- /dev/null +++ b/ksql-core/src/main/java/io/confluent/ksql/ddl/commands/DDLCommandResult.java @@ -0,0 +1,28 @@ +/** + * Copyright 2017 Confluent Inc. + **/ + +package io.confluent.ksql.ddl.commands; + +public class DDLCommandResult { + + private final boolean success; + private final String message; + + public DDLCommandResult(boolean success) { + this(success, ""); + } + + public DDLCommandResult(boolean success, String message) { + this.success = success; + this.message = message; + } + + public boolean isSuccess() { + return success; + } + + public String getMessage() { + return message; + } +} diff --git a/ksql-core/src/main/java/io/confluent/ksql/ddl/commands/DropSourceCommand.java b/ksql-core/src/main/java/io/confluent/ksql/ddl/commands/DropSourceCommand.java new file mode 100644 index 000000000000..4940921fa84c --- /dev/null +++ b/ksql-core/src/main/java/io/confluent/ksql/ddl/commands/DropSourceCommand.java @@ -0,0 +1,33 @@ +/** + * Copyright 2017 Confluent Inc. + **/ + +package io.confluent.ksql.ddl.commands; + +import io.confluent.ksql.metastore.MetaStore; +import io.confluent.ksql.metastore.StructuredDataSource; +import io.confluent.ksql.parser.tree.AbstractStreamDropStatement; +import io.confluent.ksql.util.KsqlException; + + +public class DropSourceCommand implements DDLCommand { + + private final String sourceName; + + public DropSourceCommand(AbstractStreamDropStatement statement) { + this.sourceName = statement.getName().getSuffix(); + } + + @Override + public DDLCommandResult run(MetaStore metaStore) { + StructuredDataSource dataSource = metaStore.getSource(sourceName); + if (dataSource == null) { + throw new KsqlException("Source " + sourceName + " does not exist."); + } + DropTopicCommand dropTopicCommand = new DropTopicCommand( + dataSource.getKsqlTopic().getTopicName()); + dropTopicCommand.run(metaStore); + metaStore.deleteSource(sourceName); + return new DDLCommandResult(true, "Source " + sourceName + " was dropped"); + } +} diff --git a/ksql-core/src/main/java/io/confluent/ksql/ddl/commands/DropTopicCommand.java b/ksql-core/src/main/java/io/confluent/ksql/ddl/commands/DropTopicCommand.java new file mode 100644 index 000000000000..d70fad827169 --- /dev/null +++ b/ksql-core/src/main/java/io/confluent/ksql/ddl/commands/DropTopicCommand.java @@ -0,0 +1,28 @@ +/** + * Copyright 2017 Confluent Inc. + **/ + +package io.confluent.ksql.ddl.commands; + +import io.confluent.ksql.metastore.MetaStore; +import io.confluent.ksql.parser.tree.DropTopic; + + +public class DropTopicCommand implements DDLCommand { + + private final String topicName; + + public DropTopicCommand(DropTopic dropTopic) { + this.topicName = dropTopic.getTopicName().getSuffix(); + } + + public DropTopicCommand(String topicName) { + this.topicName = topicName; + } + + @Override + public DDLCommandResult run(MetaStore metaStore) { + metaStore.deleteTopic(topicName); + return new DDLCommandResult(true, "Topic " + topicName + " was dropped"); + } +} diff --git a/ksql-core/src/main/java/io/confluent/ksql/ddl/commands/RegisterTopicCommand.java b/ksql-core/src/main/java/io/confluent/ksql/ddl/commands/RegisterTopicCommand.java new file mode 100644 index 000000000000..1e2b3e15a172 --- /dev/null +++ b/ksql-core/src/main/java/io/confluent/ksql/ddl/commands/RegisterTopicCommand.java @@ -0,0 +1,111 @@ +/** + * Copyright 2017 Confluent Inc. + **/ + +package io.confluent.ksql.ddl.commands; + +import io.confluent.ksql.ddl.DdlConfig; +import io.confluent.ksql.metastore.DataSource; +import io.confluent.ksql.metastore.KsqlTopic; +import io.confluent.ksql.metastore.MetaStore; +import io.confluent.ksql.parser.tree.Expression; +import io.confluent.ksql.parser.tree.RegisterTopic; +import io.confluent.ksql.serde.KsqlTopicSerDe; +import io.confluent.ksql.serde.avro.KsqlAvroTopicSerDe; +import io.confluent.ksql.serde.delimited.KsqlDelimitedTopicSerDe; +import io.confluent.ksql.serde.json.KsqlJsonTopicSerDe; +import io.confluent.ksql.util.KsqlException; +import io.confluent.ksql.util.StringUtil; + +import java.util.Collections; +import java.util.Map; + +public class RegisterTopicCommand implements DDLCommand { + private final String topicName; + private final String kafkaTopicName; + private final KsqlTopicSerDe topicSerDe; + private final boolean notExists; + + public RegisterTopicCommand(RegisterTopic registerTopic) { + this(registerTopic, Collections.emptyMap()); + } + + public RegisterTopicCommand(RegisterTopic registerTopic, + Map overriddenProperties) { + // TODO: find a way to merge overriddenProperties + this(registerTopic.getName().getSuffix(), + registerTopic.isNotExists(), + registerTopic.getProperties(), + overriddenProperties + ); + } + + public RegisterTopicCommand(String topicName, boolean notExist, + Map properties, + Map overriddenProperties) { + this.topicName = topicName; + // TODO: find a way to merge overriddenProperties + enforceTopicProperties(properties); + this.kafkaTopicName = StringUtil.cleanQuotes( + properties.get(DdlConfig.KAFKA_TOPIC_NAME_PROPERTY).toString()); + final String serde = StringUtil.cleanQuotes( + properties.get(DdlConfig.VALUE_FORMAT_PROPERTY).toString()); + this.topicSerDe = extractTopicSerDe(overriddenProperties, serde); + this.notExists = notExist; + } + + private KsqlTopicSerDe extractTopicSerDe(Map overriddenProperties, String serde) { + // TODO: Find a way to avoid calling toUpperCase() here; + // if the property can be an unquoted identifier, then capitalization will have already happened + switch (serde.toUpperCase()) { + case DataSource.AVRO_SERDE_NAME: + if (!overriddenProperties.containsKey(DdlConfig.AVRO_SCHEMA)) { + throw new KsqlException("Avro schema file path should be set for avro topics."); + } + String avroSchema = overriddenProperties.get(DdlConfig.AVRO_SCHEMA).toString(); + return new KsqlAvroTopicSerDe(avroSchema); + case DataSource.JSON_SERDE_NAME: + return new KsqlJsonTopicSerDe(null); + case DataSource.DELIMITED_SERDE_NAME: + return new KsqlDelimitedTopicSerDe(); + default: + throw new KsqlException("The specified topic serde is not supported."); + } + } + + private void enforceTopicProperties(final Map properties) { + if (properties.size() == 0) { + throw new KsqlException("Register topic statement needs WITH clause."); + } + + if (!properties.containsKey(DdlConfig.VALUE_FORMAT_PROPERTY)) { + throw new KsqlException("Topic format(format) should be set in WITH clause."); + } + + if (!properties.containsKey(DdlConfig.KAFKA_TOPIC_NAME_PROPERTY)) { + throw new KsqlException("Corresponding kafka topic should be set in WITH clause."); + } + } + + @Override + public DDLCommandResult run(MetaStore metaStore) { + if (metaStore.getTopic(topicName) != null) { + // Check IF NOT EXIST is set, if set, do not create topic if one exists. + if (notExists) { + return new DDLCommandResult(true, + "Topic is not registered because it already registered" + + "."); + } else { + throw new KsqlException("Topic already registered."); + } + } + + KsqlTopic ksqlTopic = new KsqlTopic(topicName, kafkaTopicName, topicSerDe); + + // TODO: Need to check if the topic exists. + // Add the topic to the metastore + metaStore.putTopic(ksqlTopic); + + return new DDLCommandResult(true, "Topic registered"); + } +} diff --git a/ksql-core/src/main/java/io/confluent/ksql/exception/ExceptionUtil.java b/ksql-core/src/main/java/io/confluent/ksql/exception/ExceptionUtil.java new file mode 100644 index 000000000000..dd061fa329a8 --- /dev/null +++ b/ksql-core/src/main/java/io/confluent/ksql/exception/ExceptionUtil.java @@ -0,0 +1,27 @@ +/** + * Copyright 2017 Confluent Inc. + **/ + +package io.confluent.ksql.exception; + +import java.io.PrintWriter; +import java.io.StringWriter; + +public class ExceptionUtil { + public static String stackTraceToString(Exception e) { + StringWriter sw = new StringWriter(); + PrintWriter pw = new PrintWriter(sw); + e.printStackTrace(pw); + return sw.toString(); + } + + public static String getCurrentStackTraceToString() { + StackTraceElement[] stackTraceElements = Thread.currentThread().getStackTrace(); + StringBuilder sb = new StringBuilder(); + for (StackTraceElement element : stackTraceElements) { + sb.append(element.toString()); + sb.append("\n"); + } + return sb.toString(); + } +} diff --git a/ksql-core/src/main/java/io/confluent/ksql/exception/KafkaResponseGetFailedException.java b/ksql-core/src/main/java/io/confluent/ksql/exception/KafkaResponseGetFailedException.java new file mode 100644 index 000000000000..08858f0db184 --- /dev/null +++ b/ksql-core/src/main/java/io/confluent/ksql/exception/KafkaResponseGetFailedException.java @@ -0,0 +1,15 @@ +/** + * Copyright 2017 Confluent Inc. + **/ + +package io.confluent.ksql.exception; + +public class KafkaResponseGetFailedException extends KafkaTopicClientException { + public KafkaResponseGetFailedException(String message) { + super(message); + } + + public KafkaResponseGetFailedException(String message, Throwable throwable) { + super(message, throwable); + } +} diff --git a/ksql-core/src/main/java/io/confluent/ksql/exception/KafkaTopicClientException.java b/ksql-core/src/main/java/io/confluent/ksql/exception/KafkaTopicClientException.java new file mode 100644 index 000000000000..a9418cb9866d --- /dev/null +++ b/ksql-core/src/main/java/io/confluent/ksql/exception/KafkaTopicClientException.java @@ -0,0 +1,15 @@ +/** + * Copyright 2017 Confluent Inc. + **/ + +package io.confluent.ksql.exception; + +public abstract class KafkaTopicClientException extends RuntimeException { + public KafkaTopicClientException(String message) { + super(message); + } + + public KafkaTopicClientException(String message, Throwable throwable) { + super(message, throwable); + } +} diff --git a/ksql-core/src/main/java/io/confluent/ksql/exception/KafkaTopicException.java b/ksql-core/src/main/java/io/confluent/ksql/exception/KafkaTopicException.java new file mode 100644 index 000000000000..807f0065645f --- /dev/null +++ b/ksql-core/src/main/java/io/confluent/ksql/exception/KafkaTopicException.java @@ -0,0 +1,15 @@ +/** + * Copyright 2017 Confluent Inc. + **/ + +package io.confluent.ksql.exception; + +public class KafkaTopicException extends KafkaTopicClientException { + public KafkaTopicException(String message) { + super(message); + } + + public KafkaTopicException(String message, Throwable throwable) { + super(message, throwable); + } +} diff --git a/ksql-core/src/main/java/io/confluent/ksql/exception/ParseFailedException.java b/ksql-core/src/main/java/io/confluent/ksql/exception/ParseFailedException.java new file mode 100644 index 000000000000..3504c74524a2 --- /dev/null +++ b/ksql-core/src/main/java/io/confluent/ksql/exception/ParseFailedException.java @@ -0,0 +1,17 @@ +/** + * Copyright 2017 Confluent Inc. + **/ + +package io.confluent.ksql.exception; + +import io.confluent.ksql.util.KsqlException; + +public class ParseFailedException extends KsqlException { + public ParseFailedException(String message) { + super(message); + } + + public ParseFailedException(String message, Throwable throwable) { + super(message, throwable); + } +} \ No newline at end of file diff --git a/ksql-core/src/main/java/io/confluent/ksql/function/KsqlAggFunctionDeterminer.java b/ksql-core/src/main/java/io/confluent/ksql/function/KsqlAggFunctionDeterminer.java new file mode 100644 index 000000000000..4f6632857dc1 --- /dev/null +++ b/ksql-core/src/main/java/io/confluent/ksql/function/KsqlAggFunctionDeterminer.java @@ -0,0 +1,33 @@ +/** + * Copyright 2017 Confluent Inc. + **/ + +package io.confluent.ksql.function; + +import org.apache.kafka.connect.data.Schema; +import org.apache.kafka.streams.kstream.Merger; + +import java.util.List; + + +public abstract class KsqlAggFunctionDeterminer { + + final String functionName; + final List aggregateFunctionList; + + public KsqlAggFunctionDeterminer(String functionName, + List aggregateFunctionList) { + this.functionName = functionName; + this.aggregateFunctionList = aggregateFunctionList; + } + + public abstract KsqlAggregateFunction getProperAggregateFunction(List argTypeList); + + public String getFunctionName() { + return functionName; + } + + public List getAggregateFunctionList() { + return aggregateFunctionList; + } +} diff --git a/ksql-core/src/main/java/io/confluent/ksql/function/KsqlAggregateFunction.java b/ksql-core/src/main/java/io/confluent/ksql/function/KsqlAggregateFunction.java new file mode 100644 index 000000000000..a91672600aa7 --- /dev/null +++ b/ksql-core/src/main/java/io/confluent/ksql/function/KsqlAggregateFunction.java @@ -0,0 +1,68 @@ +/** + * Copyright 2017 Confluent Inc. + **/ + +package io.confluent.ksql.function; + +import org.apache.kafka.connect.data.Schema; +import org.apache.kafka.streams.kstream.Merger; + +import java.util.List; + +public abstract class KsqlAggregateFunction { + + final int argIndexInValue; + public final A intialValue; + final Schema returnType; + final List arguments; + final String functionName; + final Class kudafClass; + + public KsqlAggregateFunction(Integer argIndexInValue) { + this.argIndexInValue = argIndexInValue; + this.intialValue = null; + this.returnType = null; + this.arguments = null; + this.functionName = null; + this.kudafClass = null; + } + + public KsqlAggregateFunction(int argIndexInValue, + A intialValue, Schema returnType, List arguments, + String functionName, Class kudafClass) { + this.argIndexInValue = argIndexInValue; + this.intialValue = intialValue; + this.returnType = returnType; + this.arguments = arguments; + this.functionName = functionName; + this.kudafClass = kudafClass; + } + + public abstract A aggregate(V currentVal, A currentAggVal); + + public A getIntialValue() { + return intialValue; + } + + public int getArgIndexInValue() { + return argIndexInValue; + } + + public Schema getReturnType() { + return returnType; + } + + public List getArguments() { + return arguments; + } + + public String getFunctionName() { + return functionName; + } + + public Class getKudafClass() { + return kudafClass; + } + + public abstract Merger getMerger(); +} diff --git a/ksql-core/src/main/java/io/confluent/ksql/function/KsqlFunction.java b/ksql-core/src/main/java/io/confluent/ksql/function/KsqlFunction.java new file mode 100644 index 000000000000..2f40e883954d --- /dev/null +++ b/ksql-core/src/main/java/io/confluent/ksql/function/KsqlFunction.java @@ -0,0 +1,42 @@ +/** + * Copyright 2017 Confluent Inc. + **/ + +package io.confluent.ksql.function; + +import org.apache.kafka.connect.data.Schema; + +import java.util.List; + +public class KsqlFunction { + + final Schema returnType; + final List arguments; + final String functionName; + final Class kudfClass; + + public KsqlFunction(Schema returnType, List arguments, String functionName, + Class kudfClass) { + this.returnType = returnType; + this.arguments = arguments; + this.functionName = functionName; + this.kudfClass = kudfClass; + } + + public Schema getReturnType() { + return returnType; + } + + public List getArguments() { + return arguments; + } + + public String getFunctionName() { + return functionName; + } + + + public Class getKudfClass() { + return kudfClass; + } +} diff --git a/ksql-core/src/main/java/io/confluent/ksql/function/KsqlFunctionException.java b/ksql-core/src/main/java/io/confluent/ksql/function/KsqlFunctionException.java new file mode 100644 index 000000000000..95d55415015a --- /dev/null +++ b/ksql-core/src/main/java/io/confluent/ksql/function/KsqlFunctionException.java @@ -0,0 +1,18 @@ +/** + * Copyright 2017 Confluent Inc. + **/ + +package io.confluent.ksql.function; + +import io.confluent.ksql.util.KsqlException; + +public class KsqlFunctionException extends KsqlException { + + public KsqlFunctionException(String message) { + super(message); + } + + public KsqlFunctionException(String s, Throwable throwable) { + super(s, throwable); + } +} diff --git a/ksql-core/src/main/java/io/confluent/ksql/function/KsqlFunctions.java b/ksql-core/src/main/java/io/confluent/ksql/function/KsqlFunctions.java new file mode 100644 index 000000000000..98db5da5db37 --- /dev/null +++ b/ksql-core/src/main/java/io/confluent/ksql/function/KsqlFunctions.java @@ -0,0 +1,186 @@ +/** + * Copyright 2017 Confluent Inc. + **/ + +package io.confluent.ksql.function; + +import io.confluent.ksql.function.udaf.count.CountAggFunctionDeterminer; +import io.confluent.ksql.function.udaf.max.MaxAggFunctionDeterminer; +import io.confluent.ksql.function.udaf.min.MinAggFunctionDeterminer; +import io.confluent.ksql.function.udaf.sum.SumAggFunctionDeterminer; +import io.confluent.ksql.function.udf.datetime.StringToTimestamp; +import io.confluent.ksql.function.udf.datetime.TimestampToString; +import io.confluent.ksql.function.udf.json.JsonExtractStringKudf; +import io.confluent.ksql.function.udf.math.AbsKudf; +import io.confluent.ksql.function.udf.math.CeilKudf; +import io.confluent.ksql.function.udf.math.FloorKudf; +import io.confluent.ksql.function.udf.math.RandomKudf; +import io.confluent.ksql.function.udf.math.RoundKudf; +import io.confluent.ksql.function.udf.string.ConcatKudf; +import io.confluent.ksql.function.udf.string.IfNullKudf; +import io.confluent.ksql.function.udf.string.LCaseKudf; +import io.confluent.ksql.function.udf.string.LenKudf; +import io.confluent.ksql.function.udf.string.SubstringKudf; +import io.confluent.ksql.function.udf.string.TrimKudf; +import io.confluent.ksql.function.udf.string.UCaseKudf; +import io.confluent.ksql.parser.tree.Expression; +import io.confluent.ksql.util.ExpressionTypeManager; +import io.confluent.ksql.util.KsqlException; +import org.apache.kafka.connect.data.Schema; + +import java.util.ArrayList; +import java.util.Arrays; +import java.util.HashMap; +import java.util.List; +import java.util.Map; + +public class KsqlFunctions { + + public static Map ksqlFunctionMap = new HashMap<>(); + public static Map ksqlAggregateFunctionMap = new HashMap<>(); + + static { + + /*************************************** + * String functions * + ****************************************/ + + KsqlFunction lcase = new KsqlFunction(Schema.STRING_SCHEMA, Arrays.asList(Schema.STRING_SCHEMA), + "LCASE", LCaseKudf.class); + addFunction(lcase); + + KsqlFunction ucase = new KsqlFunction(Schema.STRING_SCHEMA, Arrays.asList(Schema.STRING_SCHEMA), + "UCASE", UCaseKudf.class); + addFunction(ucase); + + KsqlFunction substring = new KsqlFunction(Schema.STRING_SCHEMA, Arrays.asList(Schema + .STRING_SCHEMA, + Schema + .INT32_SCHEMA, + Schema + .INT32_SCHEMA), + "SUBSTRING", SubstringKudf + .class); + addFunction(substring); + + KsqlFunction concat = new KsqlFunction(Schema.STRING_SCHEMA, Arrays.asList(Schema.STRING_SCHEMA, + Schema.STRING_SCHEMA), + "CONCAT", ConcatKudf.class); + addFunction(concat); + + KsqlFunction trim = new KsqlFunction(Schema.STRING_SCHEMA, Arrays.asList(Schema.STRING_SCHEMA), + "TRIM", TrimKudf.class); + addFunction(trim); + + KsqlFunction ifNull = new KsqlFunction(Schema.STRING_SCHEMA, Arrays.asList(Schema.STRING_SCHEMA, + Schema.STRING_SCHEMA), + "IFNULL", IfNullKudf.class); + addFunction(ifNull); + + KsqlFunction len = new KsqlFunction(Schema.INT32_SCHEMA, Arrays.asList(Schema.STRING_SCHEMA), + "LEN", LenKudf.class); + addFunction(len); + + /*************************************** + * Math functions * + ***************************************/ + + KsqlFunction abs = new KsqlFunction(Schema.FLOAT64_SCHEMA, Arrays.asList(Schema.FLOAT64_SCHEMA), + "ABS", AbsKudf.class); + addFunction(abs); + + KsqlFunction ceil = new KsqlFunction(Schema.FLOAT64_SCHEMA, + Arrays.asList(Schema.FLOAT64_SCHEMA), + "CEIL", CeilKudf.class); + addFunction(ceil); + + KsqlFunction floor = new KsqlFunction(Schema.FLOAT64_SCHEMA, + Arrays.asList(Schema.FLOAT64_SCHEMA), + "FLOOR", FloorKudf.class); + addFunction(floor); + + KsqlFunction + round = + new KsqlFunction(Schema.INT64_SCHEMA, Arrays.asList(Schema.FLOAT64_SCHEMA), + "ROUND", RoundKudf.class); + addFunction(round); + + KsqlFunction random = new KsqlFunction(Schema.FLOAT64_SCHEMA, new ArrayList<>(), + "RANDOM", RandomKudf.class); + addFunction(random); + + + /*************************************** + * Date/Time functions * + ***************************************/ + KsqlFunction timestampToString = new KsqlFunction(Schema.STRING_SCHEMA, + Arrays.asList(Schema.INT64_SCHEMA, + Schema.STRING_SCHEMA), + "TIMESTAMPTOSTRING", TimestampToString.class); + addFunction(timestampToString); + + KsqlFunction stringToTimestamp = new KsqlFunction(Schema.INT64_SCHEMA, + Arrays.asList(Schema.STRING_SCHEMA, + Schema.STRING_SCHEMA), + "STRINGTOTIMESTAMP", + StringToTimestamp.class); + addFunction(stringToTimestamp); + + /*************************************** + * JSON functions * + ****************************************/ + + KsqlFunction getStringFromJson = new KsqlFunction( + Schema.STRING_SCHEMA, Arrays.asList(Schema.STRING_SCHEMA, Schema.STRING_SCHEMA), + "EXTRACTJSONFIELD", JsonExtractStringKudf.class); + addFunction(getStringFromJson); + + + /*************************************** + * UDAFs * + ***************************************/ + + addAggregateFunctionDeterminer(new CountAggFunctionDeterminer()); + addAggregateFunctionDeterminer(new SumAggFunctionDeterminer()); + + addAggregateFunctionDeterminer(new MaxAggFunctionDeterminer()); + addAggregateFunctionDeterminer(new MinAggFunctionDeterminer()); + + } + + public static KsqlFunction getFunction(String functionName) { + return ksqlFunctionMap.get(functionName); + } + + public static void addFunction(KsqlFunction ksqlFunction) { + ksqlFunctionMap.put(ksqlFunction.getFunctionName().toUpperCase(), ksqlFunction); + } + + public static boolean isAnAggregateFunction(String functionName) { + if (ksqlAggregateFunctionMap.get(functionName) != null) { + return true; + } + return false; + } + + public static KsqlAggregateFunction getAggregateFunction(String functionName, List + functionArgs, Schema schema) { + KsqlAggFunctionDeterminer ksqlAggFunctionDeterminer = ksqlAggregateFunctionMap + .get(functionName); + if (ksqlAggFunctionDeterminer == null) { + throw new KsqlException("No aggregate function with name " + functionName + " exists!"); + } + ExpressionTypeManager expressionTypeManager = new ExpressionTypeManager(schema); + Schema expressionType = expressionTypeManager.getExpressionType(functionArgs.get(0)); + KsqlAggregateFunction aggregateFunction = + ksqlAggFunctionDeterminer.getProperAggregateFunction(Arrays.asList(expressionType)); + return aggregateFunction; + } + + public static void addAggregateFunctionDeterminer(KsqlAggFunctionDeterminer + ksqlAggFunctionDeterminer) { + ksqlAggregateFunctionMap.put(ksqlAggFunctionDeterminer.functionName, ksqlAggFunctionDeterminer); + } + + +} diff --git a/ksql-core/src/main/java/io/confluent/ksql/function/udaf/KudafAggregator.java b/ksql-core/src/main/java/io/confluent/ksql/function/udaf/KudafAggregator.java new file mode 100644 index 000000000000..fc6efff5411b --- /dev/null +++ b/ksql-core/src/main/java/io/confluent/ksql/function/udaf/KudafAggregator.java @@ -0,0 +1,83 @@ +/** + * Copyright 2017 Confluent Inc. + **/ + +package io.confluent.ksql.function.udaf; + +import io.confluent.ksql.function.KsqlAggregateFunction; +import io.confluent.ksql.physical.GenericRow; +import org.apache.kafka.streams.kstream.Aggregator; +import org.apache.kafka.streams.kstream.Merger; + +import java.util.ArrayList; +import java.util.List; +import java.util.Map; +import java.util.stream.Collectors; +import java.util.stream.IntStream; +import java.util.stream.Stream; + +public class KudafAggregator implements Aggregator { + + Map aggValToAggFunctionMap; + Map aggValToValColumnMap; + + public KudafAggregator(Map aggValToAggFunctionMap, Map aggValToValColumnMap) { + this.aggValToAggFunctionMap = aggValToAggFunctionMap; + this.aggValToValColumnMap = aggValToValColumnMap; + } + + @Override + public GenericRow apply(String s, GenericRow rowValue, GenericRow aggRowValue) { + + for (int aggValColIndex: aggValToValColumnMap.keySet()) { + aggRowValue.getColumns().set(aggValColIndex, rowValue.getColumns() + .get(aggValToValColumnMap.get(aggValColIndex))); + } + + for (int aggFunctionIndex: aggValToAggFunctionMap.keySet()) { + KsqlAggregateFunction ksqlAggregateFunction = aggValToAggFunctionMap.get(aggFunctionIndex); + aggRowValue.getColumns().set(aggFunctionIndex, ksqlAggregateFunction.aggregate( + rowValue.getColumns().get(ksqlAggregateFunction.getArgIndexInValue()), + aggRowValue.getColumns().get(aggFunctionIndex)) + ); + } + return aggRowValue; + } + + public Merger getMerger() { + return new Merger() { + @Override + public GenericRow apply(String key, GenericRow aggRowOne, GenericRow aggRowTwo) { + + List columns = Stream.generate(String::new).limit(aggRowOne.getColumns().size()) + .collect(Collectors.toList()); + GenericRow mergedRow = new GenericRow(columns); + + for (int aggValColIndex: aggValToValColumnMap.keySet()) { + if (aggRowOne.getColumns() + .get(aggValToValColumnMap.get(aggValColIndex)).toString().length() > 0) { + mergedRow.getColumns().set(aggValColIndex, aggRowOne.getColumns() + .get(aggValToValColumnMap.get(aggValColIndex))); + } else { + mergedRow.getColumns().set(aggValColIndex, aggRowTwo.getColumns() + .get(aggValToValColumnMap.get(aggValColIndex))); + } + + } + + for (int aggFunctionIndex: aggValToAggFunctionMap.keySet()) { + KsqlAggregateFunction ksqlAggregateFunction = aggValToAggFunctionMap + .get(aggFunctionIndex); + mergedRow.getColumns().set(aggFunctionIndex, ksqlAggregateFunction.getMerger() + .apply(key, + aggRowOne.getColumns().get(aggFunctionIndex), + aggRowTwo.getColumns().get(aggFunctionIndex)) + ); + } + return mergedRow; + } + }; + } + +} diff --git a/ksql-core/src/main/java/io/confluent/ksql/function/udaf/KudafInitializer.java b/ksql-core/src/main/java/io/confluent/ksql/function/udaf/KudafInitializer.java new file mode 100644 index 000000000000..84a59995e428 --- /dev/null +++ b/ksql-core/src/main/java/io/confluent/ksql/function/udaf/KudafInitializer.java @@ -0,0 +1,30 @@ +/** + * Copyright 2017 Confluent Inc. + **/ + +package io.confluent.ksql.function.udaf; + +import io.confluent.ksql.physical.GenericRow; +import org.apache.kafka.streams.kstream.Initializer; + +import java.util.ArrayList; +import java.util.List; + +public class KudafInitializer implements Initializer { + + final List initialGenericRowColumns; + + public KudafInitializer(List initialGenericRowColumns) { + this.initialGenericRowColumns = initialGenericRowColumns; + } + + @Override + public GenericRow apply() { + List rowColumns = new ArrayList(); + for (Object obj: initialGenericRowColumns) { + rowColumns.add(obj); + } + return new GenericRow(rowColumns); + } + +} diff --git a/ksql-core/src/main/java/io/confluent/ksql/function/udaf/count/CountAggFunctionDeterminer.java b/ksql-core/src/main/java/io/confluent/ksql/function/udaf/count/CountAggFunctionDeterminer.java new file mode 100644 index 000000000000..e03a5caf5db0 --- /dev/null +++ b/ksql-core/src/main/java/io/confluent/ksql/function/udaf/count/CountAggFunctionDeterminer.java @@ -0,0 +1,24 @@ +/** + * Copyright 2017 Confluent Inc. + **/ + +package io.confluent.ksql.function.udaf.count; + +import io.confluent.ksql.function.KsqlAggFunctionDeterminer; +import io.confluent.ksql.function.KsqlAggregateFunction; +import org.apache.kafka.connect.data.Schema; + +import java.util.Arrays; +import java.util.List; + +public class CountAggFunctionDeterminer extends KsqlAggFunctionDeterminer { + + public CountAggFunctionDeterminer() { + super("COUNT", Arrays.asList(new CountKudaf(-1))); + } + + @Override + public KsqlAggregateFunction getProperAggregateFunction(List argTypeList) { + return getAggregateFunctionList().get(0); + } +} diff --git a/ksql-core/src/main/java/io/confluent/ksql/function/udaf/count/CountKudaf.java b/ksql-core/src/main/java/io/confluent/ksql/function/udaf/count/CountKudaf.java new file mode 100644 index 000000000000..079d7c4fb9f9 --- /dev/null +++ b/ksql-core/src/main/java/io/confluent/ksql/function/udaf/count/CountKudaf.java @@ -0,0 +1,34 @@ +/** + * Copyright 2017 Confluent Inc. + **/ + +package io.confluent.ksql.function.udaf.count; + +import io.confluent.ksql.function.KsqlAggregateFunction; +import org.apache.kafka.connect.data.Schema; +import org.apache.kafka.streams.kstream.Merger; + +import java.util.Arrays; + +public class CountKudaf extends KsqlAggregateFunction { + + public CountKudaf(Integer argIndexInValue) { + super(argIndexInValue, 0L, Schema.INT64_SCHEMA, Arrays.asList(Schema.FLOAT64_SCHEMA), + "COUNT", CountKudaf.class); + } + + @Override + public Long aggregate(Object currentVal, Long currentAggVal) { + return currentAggVal + 1; + } + + @Override + public Merger getMerger() { + return new Merger() { + @Override + public Long apply(final String aggKey, final Long aggOne, final Long aggTwo) { + return aggOne + aggTwo; + } + }; + } +} diff --git a/ksql-core/src/main/java/io/confluent/ksql/function/udaf/max/DoubleMaxKudaf.java b/ksql-core/src/main/java/io/confluent/ksql/function/udaf/max/DoubleMaxKudaf.java new file mode 100644 index 000000000000..c043d12d7ee9 --- /dev/null +++ b/ksql-core/src/main/java/io/confluent/ksql/function/udaf/max/DoubleMaxKudaf.java @@ -0,0 +1,42 @@ +/** + * Copyright 2017 Confluent Inc. + **/ + +package io.confluent.ksql.function.udaf.max; + +import org.apache.kafka.connect.data.Schema; +import org.apache.kafka.streams.kstream.Merger; + +import java.util.Arrays; + +import io.confluent.ksql.function.KsqlAggregateFunction; + +public class DoubleMaxKudaf extends KsqlAggregateFunction { + + public DoubleMaxKudaf(Integer argIndexInValue) { + super(argIndexInValue, Double.MIN_VALUE, Schema.FLOAT64_SCHEMA, + Arrays.asList(Schema.FLOAT64_SCHEMA), + "MAX", DoubleMaxKudaf.class); + } + + @Override + public Double aggregate(Double currentVal, Double currentAggVal) { + if (currentVal > currentAggVal) { + return currentVal; + } + return currentAggVal; + } + + @Override + public Merger getMerger() { + return new Merger() { + @Override + public Double apply(final String aggKey, final Double aggOne, final Double aggTwo) { + if (aggOne > aggTwo) { + return aggOne; + } + return aggTwo; + } + }; + } +} diff --git a/ksql-core/src/main/java/io/confluent/ksql/function/udaf/max/LongMaxKudaf.java b/ksql-core/src/main/java/io/confluent/ksql/function/udaf/max/LongMaxKudaf.java new file mode 100644 index 000000000000..916b304e65a1 --- /dev/null +++ b/ksql-core/src/main/java/io/confluent/ksql/function/udaf/max/LongMaxKudaf.java @@ -0,0 +1,42 @@ +/** + * Copyright 2017 Confluent Inc. + **/ + +package io.confluent.ksql.function.udaf.max; + +import org.apache.kafka.connect.data.Schema; +import org.apache.kafka.streams.kstream.Merger; + +import java.util.Arrays; + +import io.confluent.ksql.function.KsqlAggregateFunction; + +public class LongMaxKudaf extends KsqlAggregateFunction { + + public LongMaxKudaf(Integer argIndexInValue) { + super(argIndexInValue, Long.MIN_VALUE, Schema.INT64_SCHEMA, + Arrays.asList(Schema.INT64_SCHEMA), + "MAX", LongMaxKudaf.class); + } + + @Override + public Long aggregate(Long currentVal, Long currentAggVal) { + if (currentVal > currentAggVal) { + return currentVal; + } + return currentAggVal; + } + + @Override + public Merger getMerger() { + return new Merger() { + @Override + public Long apply(final String aggKey, final Long aggOne, final Long aggTwo) { + if (aggOne > aggTwo) { + return aggOne; + } + return aggTwo; + } + }; + } +} diff --git a/ksql-core/src/main/java/io/confluent/ksql/function/udaf/max/MaxAggFunctionDeterminer.java b/ksql-core/src/main/java/io/confluent/ksql/function/udaf/max/MaxAggFunctionDeterminer.java new file mode 100644 index 000000000000..2cb9928c773a --- /dev/null +++ b/ksql-core/src/main/java/io/confluent/ksql/function/udaf/max/MaxAggFunctionDeterminer.java @@ -0,0 +1,33 @@ +/** + * Copyright 2017 Confluent Inc. + **/ + +package io.confluent.ksql.function.udaf.max; + +import org.apache.kafka.connect.data.Schema; + +import java.util.Arrays; +import java.util.List; + +import io.confluent.ksql.function.KsqlAggFunctionDeterminer; +import io.confluent.ksql.function.KsqlAggregateFunction; +import io.confluent.ksql.util.KsqlException; + +public class MaxAggFunctionDeterminer extends KsqlAggFunctionDeterminer { + + public MaxAggFunctionDeterminer() { + super("MAX", Arrays.asList(new DoubleMaxKudaf(-1), new LongMaxKudaf(-1))); + } + + @Override + public KsqlAggregateFunction getProperAggregateFunction(List argTypeList) { + // For now we only support aggregate functions with one arg. + for (KsqlAggregateFunction ksqlAggregateFunction : getAggregateFunctionList()) { + if (ksqlAggregateFunction.getArguments().get(0) == argTypeList.get(0)) { + return ksqlAggregateFunction; + } + } + throw new KsqlException("No Max aggregate function with " + argTypeList.get(0) + " " + + " argument type exists!"); + } +} diff --git a/ksql-core/src/main/java/io/confluent/ksql/function/udaf/min/DoubleMinKudaf.java b/ksql-core/src/main/java/io/confluent/ksql/function/udaf/min/DoubleMinKudaf.java new file mode 100644 index 000000000000..45e79a39244a --- /dev/null +++ b/ksql-core/src/main/java/io/confluent/ksql/function/udaf/min/DoubleMinKudaf.java @@ -0,0 +1,42 @@ +/** + * Copyright 2017 Confluent Inc. + **/ + +package io.confluent.ksql.function.udaf.min; + +import org.apache.kafka.connect.data.Schema; +import org.apache.kafka.streams.kstream.Merger; + +import java.util.Arrays; + +import io.confluent.ksql.function.KsqlAggregateFunction; + +public class DoubleMinKudaf extends KsqlAggregateFunction { + + public DoubleMinKudaf(Integer argIndexInValue) { + super(argIndexInValue, Double.MAX_VALUE, Schema.FLOAT64_SCHEMA, + Arrays.asList(Schema.FLOAT64_SCHEMA), + "MIN", DoubleMinKudaf.class); + } + + @Override + public Double aggregate(Double currentVal, Double currentAggVal) { + if (currentVal < currentAggVal) { + return currentVal; + } + return currentAggVal; + } + + @Override + public Merger getMerger() { + return new Merger() { + @Override + public Double apply(final String aggKey, final Double aggOne, final Double aggTwo) { + if (aggOne < aggTwo) { + return aggOne; + } + return aggTwo; + } + }; + } +} diff --git a/ksql-core/src/main/java/io/confluent/ksql/function/udaf/min/LongMinKudaf.java b/ksql-core/src/main/java/io/confluent/ksql/function/udaf/min/LongMinKudaf.java new file mode 100644 index 000000000000..bf1fbcfcdf4d --- /dev/null +++ b/ksql-core/src/main/java/io/confluent/ksql/function/udaf/min/LongMinKudaf.java @@ -0,0 +1,42 @@ +/** + * Copyright 2017 Confluent Inc. + **/ + +package io.confluent.ksql.function.udaf.min; + +import org.apache.kafka.connect.data.Schema; +import org.apache.kafka.streams.kstream.Merger; + +import java.util.Arrays; + +import io.confluent.ksql.function.KsqlAggregateFunction; + +public class LongMinKudaf extends KsqlAggregateFunction { + + public LongMinKudaf(Integer argIndexInValue) { + super(argIndexInValue, Long.MAX_VALUE, Schema.INT64_SCHEMA, + Arrays.asList(Schema.INT64_SCHEMA), + "MIN", LongMinKudaf.class); + } + + @Override + public Long aggregate(Long currentVal, Long currentAggVal) { + if (currentVal < currentAggVal) { + return currentVal; + } + return currentAggVal; + } + + @Override + public Merger getMerger() { + return new Merger() { + @Override + public Long apply(final String aggKey, final Long aggOne, final Long aggTwo) { + if (aggOne < aggTwo) { + return aggOne; + } + return aggTwo; + } + }; + } +} diff --git a/ksql-core/src/main/java/io/confluent/ksql/function/udaf/min/MinAggFunctionDeterminer.java b/ksql-core/src/main/java/io/confluent/ksql/function/udaf/min/MinAggFunctionDeterminer.java new file mode 100644 index 000000000000..2cb0f4d087db --- /dev/null +++ b/ksql-core/src/main/java/io/confluent/ksql/function/udaf/min/MinAggFunctionDeterminer.java @@ -0,0 +1,33 @@ +/** + * Copyright 2017 Confluent Inc. + **/ + +package io.confluent.ksql.function.udaf.min; + +import org.apache.kafka.connect.data.Schema; + +import java.util.Arrays; +import java.util.List; + +import io.confluent.ksql.function.KsqlAggFunctionDeterminer; +import io.confluent.ksql.function.KsqlAggregateFunction; +import io.confluent.ksql.util.KsqlException; + +public class MinAggFunctionDeterminer extends KsqlAggFunctionDeterminer { + + public MinAggFunctionDeterminer() { + super("MIN", Arrays.asList(new DoubleMinKudaf(-1), new LongMinKudaf(-1))); + } + + @Override + public KsqlAggregateFunction getProperAggregateFunction(List argTypeList) { + // For now we only support aggregate functions with one arg. + for (KsqlAggregateFunction ksqlAggregateFunction : getAggregateFunctionList()) { + if (ksqlAggregateFunction.getArguments().get(0) == argTypeList.get(0)) { + return ksqlAggregateFunction; + } + } + throw new KsqlException("No Max aggregate function with " + argTypeList.get(0) + " " + + " argument type exists!"); + } +} diff --git a/ksql-core/src/main/java/io/confluent/ksql/function/udaf/sum/DoubleSumKudaf.java b/ksql-core/src/main/java/io/confluent/ksql/function/udaf/sum/DoubleSumKudaf.java new file mode 100644 index 000000000000..f6d561d82caa --- /dev/null +++ b/ksql-core/src/main/java/io/confluent/ksql/function/udaf/sum/DoubleSumKudaf.java @@ -0,0 +1,37 @@ +/** + * Copyright 2017 Confluent Inc. + **/ + +package io.confluent.ksql.function.udaf.sum; + +import io.confluent.ksql.function.KsqlAggregateFunction; +import org.apache.kafka.connect.data.Schema; +import org.apache.kafka.streams.kstream.Merger; + +import java.util.Arrays; + +public class DoubleSumKudaf extends KsqlAggregateFunction { + + public DoubleSumKudaf(Integer argIndexInValue) { + super(argIndexInValue, 0.0, Schema.FLOAT64_SCHEMA, + Arrays.asList(Schema.FLOAT64_SCHEMA), + "SUM", DoubleSumKudaf.class); + } + + @Override + public Double aggregate(Double currentVal, Double currentAggVal) { + return currentVal + currentAggVal; + } + + @Override + public Merger getMerger() { + return new Merger() { + @Override + public Double apply(final String aggKey, final Double aggOne, final Double aggTwo) { + return aggOne + aggTwo; + } + }; + } + + +} diff --git a/ksql-core/src/main/java/io/confluent/ksql/function/udaf/sum/LongSumKudaf.java b/ksql-core/src/main/java/io/confluent/ksql/function/udaf/sum/LongSumKudaf.java new file mode 100644 index 000000000000..5d4a1b46e923 --- /dev/null +++ b/ksql-core/src/main/java/io/confluent/ksql/function/udaf/sum/LongSumKudaf.java @@ -0,0 +1,34 @@ +/** + * Copyright 2017 Confluent Inc. + **/ + +package io.confluent.ksql.function.udaf.sum; + +import io.confluent.ksql.function.KsqlAggregateFunction; +import org.apache.kafka.connect.data.Schema; +import org.apache.kafka.streams.kstream.Merger; + +import java.util.Arrays; + +public class LongSumKudaf extends KsqlAggregateFunction { + + public LongSumKudaf(Integer argIndexInValue) { + super(argIndexInValue, 0L, Schema.INT64_SCHEMA, + Arrays.asList(Schema.INT64_SCHEMA), "SUM", LongSumKudaf.class); + } + + @Override + public Long aggregate(Long currentVal, Long currentAggVal) { + return currentVal + currentAggVal; + } + + @Override + public Merger getMerger() { + return new Merger() { + @Override + public Long apply(final String aggKey, final Long aggOne, final Long aggTwo) { + return aggOne + aggTwo; + } + }; + } +} diff --git a/ksql-core/src/main/java/io/confluent/ksql/function/udaf/sum/SumAggFunctionDeterminer.java b/ksql-core/src/main/java/io/confluent/ksql/function/udaf/sum/SumAggFunctionDeterminer.java new file mode 100644 index 000000000000..40d8c18d2e48 --- /dev/null +++ b/ksql-core/src/main/java/io/confluent/ksql/function/udaf/sum/SumAggFunctionDeterminer.java @@ -0,0 +1,34 @@ +/** + * Copyright 2017 Confluent Inc. + **/ + +package io.confluent.ksql.function.udaf.sum; + +import io.confluent.ksql.function.KsqlAggFunctionDeterminer; +import io.confluent.ksql.function.KsqlAggregateFunction; +import io.confluent.ksql.util.KsqlException; +import org.apache.kafka.connect.data.Schema; +import org.apache.kafka.streams.kstream.Merger; + +import java.util.Arrays; +import java.util.List; + +public class SumAggFunctionDeterminer extends KsqlAggFunctionDeterminer { + + public SumAggFunctionDeterminer() { + super("SUM", Arrays.asList(new DoubleSumKudaf(-1), new LongSumKudaf(-1))); + } + + @Override + public KsqlAggregateFunction getProperAggregateFunction(List argTypeList) { + // For now we only support aggregate functions with one arg. + for (KsqlAggregateFunction ksqlAggregateFunction : getAggregateFunctionList()) { + if (ksqlAggregateFunction.getArguments().get(0) == argTypeList.get(0)) { + return ksqlAggregateFunction; + } + } + throw new KsqlException("No SUM aggregate function with " + argTypeList.get(0) + " " + + " argument type exists!"); + } + +} diff --git a/ksql-core/src/main/java/io/confluent/ksql/function/udf/Kudf.java b/ksql-core/src/main/java/io/confluent/ksql/function/udf/Kudf.java new file mode 100644 index 000000000000..ffebe2f7317d --- /dev/null +++ b/ksql-core/src/main/java/io/confluent/ksql/function/udf/Kudf.java @@ -0,0 +1,12 @@ +/** + * Copyright 2017 Confluent Inc. + **/ + +package io.confluent.ksql.function.udf; + +public interface Kudf { + + public void init(); + + public Object evaluate(Object... args); +} diff --git a/ksql-core/src/main/java/io/confluent/ksql/function/udf/datetime/StringToTimestamp.java b/ksql-core/src/main/java/io/confluent/ksql/function/udf/datetime/StringToTimestamp.java new file mode 100644 index 000000000000..f8a3a2c346ff --- /dev/null +++ b/ksql-core/src/main/java/io/confluent/ksql/function/udf/datetime/StringToTimestamp.java @@ -0,0 +1,38 @@ +/** + * Copyright 2017 Confluent Inc. + **/ + +package io.confluent.ksql.function.udf.datetime; + +import java.text.DateFormat; +import java.text.ParseException; +import java.text.SimpleDateFormat; + +import io.confluent.ksql.function.KsqlFunctionException; +import io.confluent.ksql.function.udf.Kudf; + +public class StringToTimestamp implements Kudf { + + DateFormat dateFormat = null; + + @Override + public void init() { + } + + @Override + public Object evaluate(Object... args) { + if (args.length != 2) { + throw new KsqlFunctionException("StringToTimestamp udf should have two input argument:" + + " date value and format."); + } + try { + if(dateFormat == null) { + dateFormat = new SimpleDateFormat(args[1].toString()); + } + return dateFormat.parse(args[0].toString()).getTime(); + } catch (ParseException e) { + throw new KsqlFunctionException("Exception running StringToTimestamp(" + args[0] +" , " + + args[1] + ") : " + e.getMessage(), e); + } + } +} diff --git a/ksql-core/src/main/java/io/confluent/ksql/function/udf/datetime/TimestampToString.java b/ksql-core/src/main/java/io/confluent/ksql/function/udf/datetime/TimestampToString.java new file mode 100644 index 000000000000..6240b0e5a4ea --- /dev/null +++ b/ksql-core/src/main/java/io/confluent/ksql/function/udf/datetime/TimestampToString.java @@ -0,0 +1,38 @@ +/** + * Copyright 2017 Confluent Inc. + **/ + +package io.confluent.ksql.function.udf.datetime; + +import java.text.DateFormat; +import java.text.SimpleDateFormat; +import java.util.Date; + +import io.confluent.ksql.function.KsqlFunctionException; +import io.confluent.ksql.function.udf.Kudf; + +public class TimestampToString implements Kudf { + + DateFormat dateFormat = null; + @Override + public void init() { + + } + + @Override + public Object evaluate(Object... args) { + if (args.length != 2) { + throw new KsqlFunctionException("TimestampToString udf should have two input argument:" + + " date value and format."); + } + try { + if(dateFormat == null) { + dateFormat = new SimpleDateFormat(args[1].toString()); + } + return dateFormat.format(new Date((long)args[0])); + } catch (Exception e) { + throw new KsqlFunctionException("Exception running TimestampToString(" + args[0] +" , " + + args[1] + ") : " + e.getMessage(), e); + } + } +} diff --git a/ksql-core/src/main/java/io/confluent/ksql/function/udf/json/JsonExtractStringKudf.java b/ksql-core/src/main/java/io/confluent/ksql/function/udf/json/JsonExtractStringKudf.java new file mode 100644 index 000000000000..f0fa6622f456 --- /dev/null +++ b/ksql-core/src/main/java/io/confluent/ksql/function/udf/json/JsonExtractStringKudf.java @@ -0,0 +1,61 @@ +/** + * Copyright 2017 Confluent Inc. + **/ + +package io.confluent.ksql.function.udf.json; + +import com.fasterxml.jackson.databind.JsonNode; +import com.fasterxml.jackson.databind.ObjectMapper; +import com.google.common.collect.ImmutableList; +import io.confluent.ksql.function.KsqlFunctionException; +import io.confluent.ksql.function.udf.Kudf; +import io.confluent.ksql.util.KsqlException; +import io.confluent.ksql.util.json.JsonPathTokenizer; + +import java.io.IOException; + +public class JsonExtractStringKudf implements Kudf { + private static final ObjectMapper OBJECT_MAPPER = new ObjectMapper(); + + String path = null; + JsonPathTokenizer jsonPathTokenizer = null; + ImmutableList tokens = null; + + @Override + public void init() { + } + + @Override + public Object evaluate(Object... args) { + if (args.length != 2) { + throw new KsqlFunctionException("getStringFromJson udf should have two input argument."); + } + String jsonString = args[0].toString(); + if (path == null) { + path = args[1].toString(); + jsonPathTokenizer = new JsonPathTokenizer(path); + tokens = ImmutableList.copyOf(jsonPathTokenizer); + } + JsonNode jsonNode = null; + try { + jsonNode = OBJECT_MAPPER.readTree(jsonString); + } catch (IOException e) { + throw new KsqlException("Invalid JSON format.", e); + } + JsonNode currentNode = jsonNode; + for (String token: tokens) { + if (currentNode == null) { + return null; + } + currentNode = currentNode.get(token); + } + if (currentNode == null) { + return null; + } + if (currentNode.isTextual()) { + return currentNode.asText(); + } else { + return currentNode.toString(); + } + } +} diff --git a/ksql-core/src/main/java/io/confluent/ksql/function/udf/math/AbsKudf.java b/ksql-core/src/main/java/io/confluent/ksql/function/udf/math/AbsKudf.java new file mode 100644 index 000000000000..966e0c31c8b4 --- /dev/null +++ b/ksql-core/src/main/java/io/confluent/ksql/function/udf/math/AbsKudf.java @@ -0,0 +1,23 @@ +/** + * Copyright 2017 Confluent Inc. + **/ + +package io.confluent.ksql.function.udf.math; + +import io.confluent.ksql.function.KsqlFunctionException; +import io.confluent.ksql.function.udf.Kudf; + +public class AbsKudf implements Kudf { + + @Override + public void init() { + } + + @Override + public Object evaluate(Object... args) { + if (args.length != 1) { + throw new KsqlFunctionException("Abs udf should have one input argument."); + } + return Math.abs((Double) args[0]); + } +} diff --git a/ksql-core/src/main/java/io/confluent/ksql/function/udf/math/CeilKudf.java b/ksql-core/src/main/java/io/confluent/ksql/function/udf/math/CeilKudf.java new file mode 100644 index 000000000000..0228f6ab1203 --- /dev/null +++ b/ksql-core/src/main/java/io/confluent/ksql/function/udf/math/CeilKudf.java @@ -0,0 +1,23 @@ +/** + * Copyright 2017 Confluent Inc. + **/ + +package io.confluent.ksql.function.udf.math; + +import io.confluent.ksql.function.KsqlFunctionException; +import io.confluent.ksql.function.udf.Kudf; + +public class CeilKudf implements Kudf { + + @Override + public void init() { + } + + @Override + public Object evaluate(Object... args) { + if (args.length != 1) { + throw new KsqlFunctionException("Ceil udf should have one input argument."); + } + return Math.ceil((Double) args[0]); + } +} diff --git a/ksql-core/src/main/java/io/confluent/ksql/function/udf/math/FloorKudf.java b/ksql-core/src/main/java/io/confluent/ksql/function/udf/math/FloorKudf.java new file mode 100644 index 000000000000..736ecac3ba50 --- /dev/null +++ b/ksql-core/src/main/java/io/confluent/ksql/function/udf/math/FloorKudf.java @@ -0,0 +1,23 @@ +/** + * Copyright 2017 Confluent Inc. + **/ + +package io.confluent.ksql.function.udf.math; + +import io.confluent.ksql.function.KsqlFunctionException; +import io.confluent.ksql.function.udf.Kudf; + +public class FloorKudf implements Kudf { + + @Override + public void init() { + } + + @Override + public Object evaluate(Object... args) { + if (args.length != 1) { + throw new KsqlFunctionException("Floor udf should have one input argument."); + } + return Math.floor((Double) args[0]); + } +} diff --git a/ksql-core/src/main/java/io/confluent/ksql/function/udf/math/RandomKudf.java b/ksql-core/src/main/java/io/confluent/ksql/function/udf/math/RandomKudf.java new file mode 100644 index 000000000000..372deecad9a3 --- /dev/null +++ b/ksql-core/src/main/java/io/confluent/ksql/function/udf/math/RandomKudf.java @@ -0,0 +1,24 @@ +/** + * Copyright 2017 Confluent Inc. + **/ + +package io.confluent.ksql.function.udf.math; + +import io.confluent.ksql.function.KsqlFunctionException; +import io.confluent.ksql.function.udf.Kudf; + +public class RandomKudf implements Kudf { + + @Override + public void init() { + + } + + @Override + public Object evaluate(Object... args) { + if (args.length != 0) { + throw new KsqlFunctionException("Random udf should have no input argument."); + } + return Math.random(); + } +} diff --git a/ksql-core/src/main/java/io/confluent/ksql/function/udf/math/RoundKudf.java b/ksql-core/src/main/java/io/confluent/ksql/function/udf/math/RoundKudf.java new file mode 100644 index 000000000000..491ae1337710 --- /dev/null +++ b/ksql-core/src/main/java/io/confluent/ksql/function/udf/math/RoundKudf.java @@ -0,0 +1,23 @@ +/** + * Copyright 2017 Confluent Inc. + **/ + +package io.confluent.ksql.function.udf.math; + +import io.confluent.ksql.function.KsqlFunctionException; +import io.confluent.ksql.function.udf.Kudf; + +public class RoundKudf implements Kudf { + + @Override + public void init() { + } + + @Override + public Object evaluate(Object... args) { + if (args.length != 1) { + throw new KsqlFunctionException("Len udf should have one input argument."); + } + return Math.round((Double) args[0]); + } +} diff --git a/ksql-core/src/main/java/io/confluent/ksql/function/udf/string/ConcatKudf.java b/ksql-core/src/main/java/io/confluent/ksql/function/udf/string/ConcatKudf.java new file mode 100644 index 000000000000..1466e4920a37 --- /dev/null +++ b/ksql-core/src/main/java/io/confluent/ksql/function/udf/string/ConcatKudf.java @@ -0,0 +1,25 @@ +/** + * Copyright 2017 Confluent Inc. + **/ + +package io.confluent.ksql.function.udf.string; + +import io.confluent.ksql.function.KsqlFunctionException; +import io.confluent.ksql.function.udf.Kudf; + +public class ConcatKudf implements Kudf { + + @Override + public void init() { + + } + + @Override + public Object evaluate(Object... args) { + if (args.length != 2) { + throw new KsqlFunctionException("Concat udf should have two input argument."); + } + String string = args[0].toString(); + return args[0].toString() + args[1].toString(); + } +} diff --git a/ksql-core/src/main/java/io/confluent/ksql/function/udf/string/IfNullKudf.java b/ksql-core/src/main/java/io/confluent/ksql/function/udf/string/IfNullKudf.java new file mode 100644 index 000000000000..2a83af07b74e --- /dev/null +++ b/ksql-core/src/main/java/io/confluent/ksql/function/udf/string/IfNullKudf.java @@ -0,0 +1,28 @@ +/** + * Copyright 2017 Confluent Inc. + **/ + +package io.confluent.ksql.function.udf.string; + +import io.confluent.ksql.function.KsqlFunctionException; +import io.confluent.ksql.function.udf.Kudf; + +public class IfNullKudf implements Kudf { + + @Override + public void init() { + + } + + @Override + public Object evaluate(Object... args) { + if (args.length != 2) { + throw new KsqlFunctionException("IfNull udf should have two input argument."); + } + if (args[0] == null) { + return args[1]; + } else { + return args[0]; + } + } +} diff --git a/ksql-core/src/main/java/io/confluent/ksql/function/udf/string/LCaseKudf.java b/ksql-core/src/main/java/io/confluent/ksql/function/udf/string/LCaseKudf.java new file mode 100644 index 000000000000..f7ab0f03588b --- /dev/null +++ b/ksql-core/src/main/java/io/confluent/ksql/function/udf/string/LCaseKudf.java @@ -0,0 +1,24 @@ +/** + * Copyright 2017 Confluent Inc. + **/ + +package io.confluent.ksql.function.udf.string; + +import io.confluent.ksql.function.KsqlFunctionException; +import io.confluent.ksql.function.udf.Kudf; + +public class LCaseKudf implements Kudf { + + @Override + public void init() { + + } + + @Override + public Object evaluate(Object... args) { + if (args.length != 1) { + throw new KsqlFunctionException("LCase udf should have one input argument."); + } + return args[0].toString().toLowerCase(); + } +} diff --git a/ksql-core/src/main/java/io/confluent/ksql/function/udf/string/LenKudf.java b/ksql-core/src/main/java/io/confluent/ksql/function/udf/string/LenKudf.java new file mode 100644 index 000000000000..12df0046d692 --- /dev/null +++ b/ksql-core/src/main/java/io/confluent/ksql/function/udf/string/LenKudf.java @@ -0,0 +1,24 @@ +/** + * Copyright 2017 Confluent Inc. + **/ + +package io.confluent.ksql.function.udf.string; + +import io.confluent.ksql.function.KsqlFunctionException; +import io.confluent.ksql.function.udf.Kudf; + +public class LenKudf implements Kudf { + + @Override + public void init() { + + } + + @Override + public Object evaluate(Object... args) { + if (args.length != 1) { + throw new KsqlFunctionException("Length udf should have one input argument."); + } + return args[0].toString().length(); + } +} diff --git a/ksql-core/src/main/java/io/confluent/ksql/function/udf/string/SubstringKudf.java b/ksql-core/src/main/java/io/confluent/ksql/function/udf/string/SubstringKudf.java new file mode 100644 index 000000000000..a1c07e7406a8 --- /dev/null +++ b/ksql-core/src/main/java/io/confluent/ksql/function/udf/string/SubstringKudf.java @@ -0,0 +1,31 @@ +/** + * Copyright 2017 Confluent Inc. + **/ + +package io.confluent.ksql.function.udf.string; + +import io.confluent.ksql.function.KsqlFunctionException; +import io.confluent.ksql.function.udf.Kudf; + +public class SubstringKudf implements Kudf { + + @Override + public void init() { + + } + + @Override + public Object evaluate(Object... args) { + if ((args.length < 2) || (args.length > 3)) { + throw new KsqlFunctionException("Substring udf should have two or three input argument."); + } + String string = args[0].toString(); + long start = (Long) args[1]; + if (args.length == 2) { + return string.substring((int) start); + } else { + long end = (Long) args[2]; + return string.substring((int) start, (int) end); + } + } +} diff --git a/ksql-core/src/main/java/io/confluent/ksql/function/udf/string/TrimKudf.java b/ksql-core/src/main/java/io/confluent/ksql/function/udf/string/TrimKudf.java new file mode 100644 index 000000000000..ecd9d9de26ea --- /dev/null +++ b/ksql-core/src/main/java/io/confluent/ksql/function/udf/string/TrimKudf.java @@ -0,0 +1,24 @@ +/** + * Copyright 2017 Confluent Inc. + **/ + +package io.confluent.ksql.function.udf.string; + +import io.confluent.ksql.function.KsqlFunctionException; +import io.confluent.ksql.function.udf.Kudf; + +public class TrimKudf implements Kudf { + + @Override + public void init() { + + } + + @Override + public Object evaluate(Object... args) { + if (args.length != 1) { + throw new KsqlFunctionException("Trim udf should have one input argument."); + } + return args[0].toString().trim(); + } +} diff --git a/ksql-core/src/main/java/io/confluent/ksql/function/udf/string/UCaseKudf.java b/ksql-core/src/main/java/io/confluent/ksql/function/udf/string/UCaseKudf.java new file mode 100644 index 000000000000..19663d2a75ac --- /dev/null +++ b/ksql-core/src/main/java/io/confluent/ksql/function/udf/string/UCaseKudf.java @@ -0,0 +1,24 @@ +/** + * Copyright 2017 Confluent Inc. + **/ + +package io.confluent.ksql.function.udf.string; + +import io.confluent.ksql.function.KsqlFunctionException; +import io.confluent.ksql.function.udf.Kudf; + +public class UCaseKudf implements Kudf { + + @Override + public void init() { + + } + + @Override + public Object evaluate(Object... args) { + if (args.length != 1) { + throw new KsqlFunctionException("UCase udf should have one input argument."); + } + return args[0].toString().toUpperCase(); + } +} diff --git a/ksql-core/src/main/java/io/confluent/ksql/function/udf/util/CastKudf.java b/ksql-core/src/main/java/io/confluent/ksql/function/udf/util/CastKudf.java new file mode 100644 index 000000000000..8af266628f5f --- /dev/null +++ b/ksql-core/src/main/java/io/confluent/ksql/function/udf/util/CastKudf.java @@ -0,0 +1,26 @@ +/** + * Copyright 2017 Confluent Inc. + **/ + +package io.confluent.ksql.function.udf.util; + +import io.confluent.ksql.function.KsqlFunctionException; +import io.confluent.ksql.function.udf.Kudf; + +public class CastKudf implements Kudf { + + @Override + public void init() { + + } + + @Override + public Object evaluate(Object... args) { + if (args.length != 2) { + throw new KsqlFunctionException("Concat udf should have two input argument."); + } + String string = args[1].toString(); + + return args[0].toString() + args[1].toString(); + } +} diff --git a/ksql-core/src/main/java/io/confluent/ksql/metastore/DataSource.java b/ksql-core/src/main/java/io/confluent/ksql/metastore/DataSource.java new file mode 100644 index 000000000000..e8524aa72d37 --- /dev/null +++ b/ksql-core/src/main/java/io/confluent/ksql/metastore/DataSource.java @@ -0,0 +1,21 @@ +/** + * Copyright 2017 Confluent Inc. + **/ + +package io.confluent.ksql.metastore; + +public interface DataSource { + + public static enum DataSourceType { KTOPIC, KSTREAM, KTABLE } + + public static enum DataSourceSerDe { JSON, AVRO, DELIMITED } + + public static final String AVRO_SERDE_NAME = "AVRO"; + public static final String JSON_SERDE_NAME = "JSON"; + public static final String DELIMITED_SERDE_NAME = "DELIMITED"; + + public String getName(); + + public DataSourceType getDataSourceType(); + +} diff --git a/ksql-core/src/main/java/io/confluent/ksql/metastore/KsqlStdOut.java b/ksql-core/src/main/java/io/confluent/ksql/metastore/KsqlStdOut.java new file mode 100644 index 000000000000..290a3d853069 --- /dev/null +++ b/ksql-core/src/main/java/io/confluent/ksql/metastore/KsqlStdOut.java @@ -0,0 +1,48 @@ +/** + * Copyright 2017 Confluent Inc. + **/ + +package io.confluent.ksql.metastore; + +import org.apache.kafka.connect.data.Field; +import org.apache.kafka.connect.data.Schema; + +public class KsqlStdOut extends StructuredDataSource { + + public static final String KSQL_STDOUT_NAME = "KSQL_STDOUT_NAME"; + + public KsqlStdOut(final String datasourceName, final Schema schema, final Field keyField, + final Field timestampField, final DataSourceType dataSourceType) { + super(datasourceName, schema, keyField, timestampField, dataSourceType, null); + } + + @Override + public String getName() { + return null; + } + + @Override + public Schema getSchema() { + return null; + } + + @Override + public Field getKeyField() { + return null; + } + + @Override + public DataSourceType getDataSourceType() { + return null; + } + + @Override + public StructuredDataSource cloneWithTimeKeyColumns() { + return this; + } + + @Override + public StructuredDataSource cloneWithTimeField(String timestampfieldName) { + return this; + } +} diff --git a/ksql-core/src/main/java/io/confluent/ksql/metastore/KsqlStream.java b/ksql-core/src/main/java/io/confluent/ksql/metastore/KsqlStream.java new file mode 100644 index 000000000000..a3ff65863847 --- /dev/null +++ b/ksql-core/src/main/java/io/confluent/ksql/metastore/KsqlStream.java @@ -0,0 +1,43 @@ +/** + * Copyright 2017 Confluent Inc. + **/ + +package io.confluent.ksql.metastore; + +import io.confluent.ksql.util.KsqlException; +import io.confluent.ksql.util.SchemaUtil; +import org.apache.kafka.connect.data.Field; +import org.apache.kafka.connect.data.Schema; + +import java.util.Optional; + +public class KsqlStream extends StructuredDataSource { + + public KsqlStream(final String datasourceName, final Schema schema, final Field keyField, + final Field timestampField, + final KsqlTopic ksqlTopic) { + super(datasourceName, schema, keyField, timestampField, DataSourceType.KSTREAM, ksqlTopic); + } + + @Override + public StructuredDataSource cloneWithTimeKeyColumns() { + Schema newSchema = SchemaUtil.addImplicitRowTimeRowKeyToSchema(schema); + return new KsqlStream(dataSourceName, newSchema, keyField, timestampField, ksqlTopic); + } + + @Override + public StructuredDataSource cloneWithTimeField(String timestampfieldName) { + Optional newTimestampField = SchemaUtil.getFieldByName(schema, timestampfieldName); + if (newTimestampField.get().schema().type() != Schema.Type.INT64) { + throw new KsqlException("Timestamp column, " + timestampfieldName + ", should be LONG" + + "(INT64)."); + } + return new KsqlStream(dataSourceName, schema, keyField, newTimestampField.get(), ksqlTopic); + } + + @Override + public String toString() { + return getClass().getSimpleName() + " name:" + getName(); + } + +} diff --git a/ksql-core/src/main/java/io/confluent/ksql/metastore/KsqlTable.java b/ksql-core/src/main/java/io/confluent/ksql/metastore/KsqlTable.java new file mode 100644 index 000000000000..791ed66d0a2a --- /dev/null +++ b/ksql-core/src/main/java/io/confluent/ksql/metastore/KsqlTable.java @@ -0,0 +1,57 @@ +/** + * Copyright 2017 Confluent Inc. + **/ + +package io.confluent.ksql.metastore; + +import io.confluent.ksql.util.KsqlException; +import io.confluent.ksql.util.SchemaUtil; +import org.apache.kafka.connect.data.Field; +import org.apache.kafka.connect.data.Schema; + +import java.util.Optional; + +public class KsqlTable extends StructuredDataSource { + + final String stateStoreName; + final boolean isWindowed; + + public KsqlTable(final String datasourceName, final Schema schema, final Field keyField, + final Field timestampField, + final KsqlTopic ksqlTopic, final String stateStoreName, boolean isWindowed) { + super(datasourceName, schema, keyField, timestampField, DataSourceType.KTABLE, ksqlTopic); + this.stateStoreName = stateStoreName; + this.isWindowed = isWindowed; + } + + public String getStateStoreName() { + return stateStoreName; + } + + public boolean isWindowed() { + return isWindowed; + } + + @Override + public StructuredDataSource cloneWithTimeKeyColumns() { + Schema newSchema = SchemaUtil.addImplicitRowTimeRowKeyToSchema(schema); + return new KsqlTable(dataSourceName, newSchema, keyField, timestampField, ksqlTopic, + stateStoreName, isWindowed); + } + + @Override + public StructuredDataSource cloneWithTimeField(String timestampfieldName) { + Optional newTimestampField = SchemaUtil.getFieldByName(schema, timestampfieldName); + if (newTimestampField.get().schema().type() != Schema.Type.INT64) { + throw new KsqlException("Timestamp column, " + timestampfieldName + ", should be LONG" + + "(INT64)."); + } + return new KsqlTable(dataSourceName, schema, keyField, newTimestampField.get(), ksqlTopic, + stateStoreName, isWindowed); + } + + @Override + public String toString() { + return getClass().getSimpleName() + " name:" + getName(); + } +} diff --git a/ksql-core/src/main/java/io/confluent/ksql/metastore/KsqlTopic.java b/ksql-core/src/main/java/io/confluent/ksql/metastore/KsqlTopic.java new file mode 100644 index 000000000000..6059f479b35d --- /dev/null +++ b/ksql-core/src/main/java/io/confluent/ksql/metastore/KsqlTopic.java @@ -0,0 +1,57 @@ +/** + * Copyright 2017 Confluent Inc. + **/ + +package io.confluent.ksql.metastore; + +import io.confluent.ksql.serde.KsqlTopicSerDe; +import io.confluent.ksql.util.KsqlException; + +public class KsqlTopic implements DataSource { + + final String topicName; + final String kafkaTopicName; + final KsqlTopicSerDe ksqlTopicSerDe; + + public KsqlTopic(final String topicName, final String kafkaTopicName, final KsqlTopicSerDe + ksqlTopicSerDe) { + this.topicName = topicName; + this.kafkaTopicName = kafkaTopicName; + this.ksqlTopicSerDe = ksqlTopicSerDe; + } + + public KsqlTopicSerDe getKsqlTopicSerDe() { + return ksqlTopicSerDe; + } + + public String getKafkaTopicName() { + return kafkaTopicName; + } + + public String getTopicName() { + return topicName; + } + + public static DataSourceSerDe getDataSpDataSourceSerDe(String dataSourceSerdeName) { + switch (dataSourceSerdeName) { + case "JSON": + return DataSourceSerDe.JSON; + case "AVRO": + return DataSourceSerDe.AVRO; + case "DELIMITED": + return DataSourceSerDe.DELIMITED; + default: + throw new KsqlException("DataSource Type is not supported: " + dataSourceSerdeName); + } + } + + @Override + public String getName() { + return topicName; + } + + @Override + public DataSourceType getDataSourceType() { + return DataSourceType.KTOPIC; + } +} diff --git a/ksql-core/src/main/java/io/confluent/ksql/metastore/MetaStore.java b/ksql-core/src/main/java/io/confluent/ksql/metastore/MetaStore.java new file mode 100644 index 000000000000..931ddd60392f --- /dev/null +++ b/ksql-core/src/main/java/io/confluent/ksql/metastore/MetaStore.java @@ -0,0 +1,35 @@ +/** + * Copyright 2017 Confluent Inc. + **/ + +package io.confluent.ksql.metastore; + +import java.util.Map; +import java.util.Set; + +public interface MetaStore { + + public KsqlTopic getTopic(String topicName); + + public void putTopic(KsqlTopic topic); + + public StructuredDataSource getSource(String sourceName); + + public void putSource(StructuredDataSource dataSource); + + public void deleteTopic(String topicName); + + public void deleteSource(String sourceName); + + public Map getAllStructuredDataSources(); + + public Set getAllStructuredDataSourceNames(); + + public Map getAllKsqlTopics(); + + public Set getAllTopicNames(); + + public void putAll(MetaStore otherMetaStore); + + public MetaStore clone(); +} diff --git a/ksql-core/src/main/java/io/confluent/ksql/metastore/MetaStoreImpl.java b/ksql-core/src/main/java/io/confluent/ksql/metastore/MetaStoreImpl.java new file mode 100644 index 000000000000..76b39fc9a98a --- /dev/null +++ b/ksql-core/src/main/java/io/confluent/ksql/metastore/MetaStoreImpl.java @@ -0,0 +1,113 @@ +/** + * Copyright 2017 Confluent Inc. + **/ + +package io.confluent.ksql.metastore; + +import io.confluent.ksql.util.KsqlException; + +import java.util.HashMap; +import java.util.Map; +import java.util.Set; + +public class MetaStoreImpl implements MetaStore { + + private final Map topicMap; + private final Map dataSourceMap; + + public MetaStoreImpl() { + this.topicMap = new HashMap<>(); + this.dataSourceMap = new HashMap<>(); + } + + private MetaStoreImpl(Map topicMap, Map dataSourceMap) { + this.topicMap = (topicMap != null) ? topicMap : new HashMap<>(); + this.dataSourceMap = (dataSourceMap != null) ? dataSourceMap : new HashMap<>(); + } + + @Override + public KsqlTopic getTopic(String topicName) { + return topicMap.get(topicName); + } + + @Override + public void putTopic(final KsqlTopic topic) { + if (topicMap.get(topic.getName()) == null) { + topicMap.put(topic.getName(), topic); + } else { + throw new KsqlException( + "Cannot add the new topic. Another topic with the same name already exists: " + + topic.getName()); + } + } + + @Override + public StructuredDataSource getSource(final String sourceName) { + return dataSourceMap.get(sourceName); + } + + @Override + public void putSource(final StructuredDataSource dataSource) { + if (getSource(dataSource.getName()) == null) { + dataSourceMap.put(dataSource.getName(), dataSource); + } else { + throw new KsqlException( + "Cannot add the new data source. Another data source with the same name already exists: " + + dataSource.toString()); + } + } + + @Override + public void deleteTopic(String topicName) { + if (!topicMap.containsKey(topicName)) { + throw new KsqlException(String.format("No topic with name %s was registered.", true)); + } + topicMap.remove(topicName); + } + + @Override + public void deleteSource(final String sourceName) { + if (!dataSourceMap.containsKey(sourceName)) { + throw new KsqlException(String.format("No data source with name %s exists.", sourceName)); + } + dataSourceMap.remove(sourceName); + } + + @Override + public Map getAllStructuredDataSources() { + return dataSourceMap; + } + + @Override + public Set getAllStructuredDataSourceNames() { + return getAllStructuredDataSources().keySet(); + } + + @Override + public Map getAllKsqlTopics() { + return topicMap; + } + + @Override + public Set getAllTopicNames() { + return getAllKsqlTopics().keySet(); + } + + @Override + public void putAll(MetaStore otherMetaStore) { + this.topicMap.putAll(otherMetaStore.getAllKsqlTopics()); + this.dataSourceMap.putAll(otherMetaStore.getAllStructuredDataSources()); + } + + @Override + public MetaStore clone() { + Map cloneTopicMap = new HashMap<>(); + Map cloneDataSourceMap = new HashMap<>(); + + cloneTopicMap.putAll(topicMap); + cloneDataSourceMap.putAll(dataSourceMap); + + return new MetaStoreImpl(cloneTopicMap, cloneDataSourceMap); + } + +} diff --git a/ksql-core/src/main/java/io/confluent/ksql/metastore/MetastoreUtil.java b/ksql-core/src/main/java/io/confluent/ksql/metastore/MetastoreUtil.java new file mode 100644 index 000000000000..3a5e595d988e --- /dev/null +++ b/ksql-core/src/main/java/io/confluent/ksql/metastore/MetastoreUtil.java @@ -0,0 +1,351 @@ +/** + * Copyright 2017 Confluent Inc. + **/ + +package io.confluent.ksql.metastore; + +import com.fasterxml.jackson.databind.JsonNode; +import com.fasterxml.jackson.databind.ObjectMapper; +import com.fasterxml.jackson.databind.node.ArrayNode; + +import io.confluent.ksql.ddl.DdlConfig; +import io.confluent.ksql.serde.KsqlTopicSerDe; +import io.confluent.ksql.serde.avro.KsqlAvroTopicSerDe; +import io.confluent.ksql.serde.delimited.KsqlDelimitedTopicSerDe; +import io.confluent.ksql.serde.json.KsqlJsonTopicSerDe; +import io.confluent.ksql.util.KsqlException; +import org.apache.kafka.connect.data.Field; +import org.apache.kafka.connect.data.Schema; +import org.apache.kafka.connect.data.SchemaBuilder; + +import java.io.FileNotFoundException; +import java.io.IOException; +import java.io.RandomAccessFile; +import java.nio.file.Files; +import java.nio.file.Paths; +import java.util.HashSet; +import java.util.Map; +import java.util.Set; + +public class MetastoreUtil { + + private StructuredDataSource createStructuredDataSource(final MetaStore metaStore, + final JsonNode node) + throws + IOException { + + KsqlTopicSerDe topicSerDe; + + String name = node.get("name").asText(); + String topicname = node.get("topic").asText(); + + KsqlTopic ksqlTopic = (KsqlTopic) metaStore.getTopic(topicname); + if (ksqlTopic == null) { + throw new KsqlException("Unable to add the structured data source. The corresponding topic " + + "does not exist: " + topicname); + } + + String type = node.get("type").asText().toUpperCase(); + String keyFieldName = node.get("key").asText(); + String timestampFieldName = node.get("timestamp").asText(); + ArrayNode fields = (ArrayNode) node.get("fields"); + Schema dataSource = buildDatasourceSchema(name, fields); + + if ("STREAM".equals(type)) { + return new KsqlStream(name, dataSource, dataSource.field(keyFieldName), + (dataSource.field(timestampFieldName) != null) + ? dataSource.field(timestampFieldName) : null, ksqlTopic); + } else if ("TABLE".equals(type)) { + boolean isWindowed = false; + if (node.get("iswindowed") != null) { + isWindowed = node.get("iswindowed").asBoolean(); + } + // Use the changelog topic name as state store name. + if (node.get("statestore") == null) { + return new KsqlTable(name, dataSource, dataSource.field(keyFieldName), + (dataSource.field(timestampFieldName) != null) + ? dataSource.field(timestampFieldName) : null, + ksqlTopic, ksqlTopic.getName(), isWindowed); + } + String stateStore = node.get("statestore").asText(); + return new KsqlTable(name, dataSource, dataSource.field(keyFieldName), + (dataSource.field(timestampFieldName) != null) + ? dataSource.field(timestampFieldName) : null, + ksqlTopic, stateStore, isWindowed); + } + throw new KsqlException(String.format("Type not supported: '%s'", type)); + } + + private Schema buildDatasourceSchema(String name, ArrayNode fields) { + SchemaBuilder dataSourceBuilder = SchemaBuilder.struct().name(name); + for (int i = 0; i < fields.size(); i++) { + String fieldName = fields.get(i).get("name").textValue(); + String fieldType; + if (fields.get(i).get("type").isArray()) { + fieldType = fields.get(i).get("type").get(0).textValue(); + } else { + fieldType = fields.get(i).get("type").textValue(); + } + + dataSourceBuilder.field(fieldName, getKsqlType(fieldType)); + } + + return dataSourceBuilder.build(); + } + + private KsqlTopic createKafkaTopicDataSource(final JsonNode node) throws IOException { + + KsqlTopicSerDe topicSerDe; + String topicname = node.get("topicname").asText(); + String kafkaTopicName = node.get("kafkatopicname").asText(); + String serde = node.get("serde").asText().toUpperCase(); + if ("AVRO".equals(serde)) { + if (node.get(DdlConfig.AVRO_SCHEMA_FILE.toLowerCase()) == null) { + throw new KsqlException("For avro SerDe avro schema file path (avroschemafile) should be " + + "set in the schema."); + } + String schemaPath = node.get(DdlConfig.AVRO_SCHEMA_FILE.toLowerCase()).asText(); + String avroSchema = getAvroSchema(schemaPath); + topicSerDe = new KsqlAvroTopicSerDe(avroSchema); + } else if ("JSON".equals(serde)) { + topicSerDe = new KsqlJsonTopicSerDe(null); + } else if ("DELIMITED".equals(serde)) { + topicSerDe = new KsqlDelimitedTopicSerDe(); + } else { + throw new KsqlException("Topic serde is not supported."); + } + + return new KsqlTopic(topicname, kafkaTopicName, topicSerDe); + } + + private Schema getKsqlType(final String sqlType) { + switch (sqlType.toUpperCase()) { + case "STRING": + return Schema.STRING_SCHEMA; + case "BOOL": + return Schema.BOOLEAN_SCHEMA; + case "INT": + return Schema.INT32_SCHEMA; + case "LONG": + return Schema.INT64_SCHEMA; + case "DOUBLE": + return Schema.FLOAT64_SCHEMA; + default: + throw new KsqlException("Unsupported type: " + sqlType); + } + } + + private String getKsqlTypeInJson(final Schema schemaType) { + if (schemaType == Schema.INT64_SCHEMA) { + return "LONG"; + } else if (schemaType == Schema.STRING_SCHEMA) { + return "STRING"; + } else if (schemaType == Schema.FLOAT64_SCHEMA) { + return "DOUBLE"; + } else if (schemaType == Schema.INT64_SCHEMA) { + return "INTEGER"; + } else if (schemaType == Schema.BOOLEAN_SCHEMA) { + return "BOOL"; + } + throw new KsqlException("Unsupported type: " + schemaType); + } + + public MetaStore loadMetaStoreFromJsonFile(final String metaStoreJsonFilePath) + throws KsqlException { + + try { + MetaStoreImpl metaStore = new MetaStoreImpl(); + byte[] jsonData = Files.readAllBytes(Paths.get(metaStoreJsonFilePath)); + + ObjectMapper objectMapper = new ObjectMapper(); + JsonNode root = objectMapper.readTree(jsonData); + + ArrayNode topicNodes = (ArrayNode) root.get("topics"); + for (JsonNode schemaNode : topicNodes) { + KsqlTopic ksqlTopic = createKafkaTopicDataSource(schemaNode); + metaStore.putTopic(ksqlTopic); + } + + ArrayNode schemaNodes = (ArrayNode) root.get("schemas"); + for (JsonNode schemaNode : schemaNodes) { + StructuredDataSource dataSource = createStructuredDataSource(metaStore, schemaNode); + metaStore.putSource(dataSource); + } + return metaStore; + } catch (FileNotFoundException fnf) { + throw new KsqlException("Could not load the schema file from " + metaStoreJsonFilePath, fnf); + } catch (IOException ioex) { + throw new KsqlException("Could not read schema from " + metaStoreJsonFilePath, ioex); + } + } + + private void addTopics(final StringBuilder stringBuilder, final Map topicMap) { + stringBuilder.append("\"topics\" :[ \n"); + boolean isFist = true; + for (KsqlTopic ksqlTopic : topicMap.values()) { + if (!isFist) { + stringBuilder.append("\t\t, \n"); + } else { + isFist = false; + } + stringBuilder.append("\t\t{\n"); + stringBuilder.append("\t\t\t \"namespace\": \"ksql-topics\", \n"); + stringBuilder.append("\t\t\t \"topicname\": \"" + ksqlTopic.getTopicName() + "\", \n"); + stringBuilder + .append("\t\t\t \"kafkatopicname\": \"" + ksqlTopic.getKafkaTopicName() + "\", \n"); + stringBuilder.append("\t\t\t \"serde\": \"" + ksqlTopic.getKsqlTopicSerDe().getSerDe() + + "\""); + if (ksqlTopic.getKsqlTopicSerDe() instanceof KsqlAvroTopicSerDe) { + KsqlAvroTopicSerDe ksqlAvroTopicSerDe = (KsqlAvroTopicSerDe) ksqlTopic.getKsqlTopicSerDe(); + } + stringBuilder.append("\n\t\t}\n"); + } + stringBuilder.append("\t\t]\n"); + } + + private void addSchemas(final StringBuilder stringBuilder, final Map + dataSourceMap) { + stringBuilder.append("\t\"schemas\" :[ \n"); + boolean isFirst = true; + for (StructuredDataSource structuredDataSource : dataSourceMap.values()) { + if (isFirst) { + isFirst = false; + } else { + stringBuilder.append("\t\t, \n"); + } + stringBuilder.append("\t\t{ \n"); + stringBuilder.append("\t\t\t \"namespace\": \"ksql\", \n"); + if (structuredDataSource.dataSourceType == DataSource.DataSourceType.KSTREAM) { + stringBuilder.append("\t\t\t \"type\": \"STREAM\", \n"); + } else if (structuredDataSource.dataSourceType == DataSource.DataSourceType.KTABLE) { + stringBuilder.append("\t\t\t \"type\": \"TABLE\", \n"); + } else { + throw new KsqlException("Incorrect data source type:" + + structuredDataSource.dataSourceType); + } + + stringBuilder.append("\t\t\t \"name\": \"" + structuredDataSource.getName() + "\", \n"); + stringBuilder + .append("\t\t\t \"key\": \"" + structuredDataSource.getKeyField().name() + "\", \n"); + stringBuilder + .append("\t\t\t \"timestamp\": \"null\", " + + "\n"); + stringBuilder + .append("\t\t\t \"topic\": \"" + structuredDataSource.getKsqlTopic().getName() + + "\", \n"); + if (structuredDataSource instanceof KsqlTable) { + KsqlTable ksqlTable = (KsqlTable) structuredDataSource; + stringBuilder.append("\t\t\t \"statestore\": \"" + ksqlTable.getStateStoreName() + + "\", \n"); + stringBuilder.append("\t\t\t \"iswindowed\": \"" + ksqlTable.isWindowed() + "\", \n"); + } + stringBuilder.append("\t\t\t \"fields\": [\n"); + boolean isFirstField = true; + for (Field field : structuredDataSource.getSchema().fields()) { + if (isFirstField) { + isFirstField = false; + } else { + stringBuilder.append(", \n"); + } + stringBuilder.append("\t\t\t {\"name\": \"" + field.name() + "\", \"type\": " + + "\"" + getKsqlTypeInJson(field.schema()) + "\"} "); + } + stringBuilder.append("\t\t\t ]\n"); + stringBuilder.append("\t\t}\n"); + } + stringBuilder.append("\t ]\n"); + } + + public void writeMetastoreToFile(String filePath, MetaStore metaStore) { + StringBuilder stringBuilder = new StringBuilder("{ \n \"name\": \"ksql_catalog\",\n "); + + addTopics(stringBuilder, metaStore.getAllKsqlTopics()); + stringBuilder.append("\n\t, \n"); + addSchemas(stringBuilder, metaStore.getAllStructuredDataSources()); + stringBuilder.append("}"); + + try { + RandomAccessFile raf = new RandomAccessFile(filePath, "rw"); + raf.writeBytes(stringBuilder.toString()); + raf.close(); + } catch (IOException e) { + throw new KsqlException(" Could not write the schema into the file."); + } + } + + + public static final String DEFAULT_METASTORE_SCHEMA = "{\n" + + "\t\"name\": \"ksql_catalog\",\n" + + "\t\"topics\":[],\n" + + "\t\"schemas\" :[]\n" + + "}"; + + private String getAvroSchema(final String schemaFilePath) throws IOException { + byte[] jsonData = Files.readAllBytes(Paths.get(schemaFilePath)); + ObjectMapper objectMapper = new ObjectMapper(); + JsonNode root = objectMapper.readTree(jsonData); + return root.toString(); + } + + public void writeAvroSchemaFile(final String avroSchema, final String filePath) { + + try { + RandomAccessFile randomAccessFile = new RandomAccessFile(filePath, "rw"); + randomAccessFile.writeBytes(avroSchema); + randomAccessFile.close(); + } catch (IOException e) { + throw new KsqlException("Could not write result avro schema file: " + filePath); + } + } + + public String buildAvroSchema(final Schema schema, String name) { + StringBuilder stringBuilder = new StringBuilder("{\n\t\"namespace\": \"ksql\",\n"); + stringBuilder.append("\t\"name\": \"" + name + "\",\n"); + stringBuilder.append("\t\"type\": \"record\",\n"); + stringBuilder.append("\t\"fields\": [\n"); + boolean addCamma = false; + Set fieldNameSet = new HashSet<>(); + for (Field field : schema.fields()) { + if (addCamma) { + stringBuilder.append(",\n"); + } else { + addCamma = true; + } + String fieldName = field.name().replace(".", "_"); + while (fieldNameSet.contains(fieldName)) { + fieldName = fieldName + "_"; + } + fieldNameSet.add(fieldName); + stringBuilder + .append("\t\t{\"name\": \"" + fieldName + "\", \"type\": " + + getAvroTypeName(field.schema()) + "}"); + } + stringBuilder.append("\n\t]\n"); + stringBuilder.append("}"); + return stringBuilder.toString(); + } + + private String getAvroTypeName(final Schema schema) { + switch (schema.type()) { + case STRING: + return "\"string\""; + case BOOLEAN: + return "\"boolean\""; + case INT32: + return "\"int\""; + case INT64: + return "\"long\""; + case FLOAT64: + return "\"double\""; + default: + if (schema.type() == Schema.Type.ARRAY) { + return "{\"type\": \"array\", \"items\": " + + getAvroTypeName(schema.valueSchema()) + "}"; + } else if (schema.type() == Schema.Type.MAP) { + return "{\"type\": \"map\", \"values\": " + + getAvroTypeName(schema.valueSchema()) + "}"; + } + throw new KsqlException("Unsupported AVRO type: " + schema.type().name()); + } + } +} \ No newline at end of file diff --git a/ksql-core/src/main/java/io/confluent/ksql/metastore/StructuredDataSource.java b/ksql-core/src/main/java/io/confluent/ksql/metastore/StructuredDataSource.java new file mode 100644 index 000000000000..0a963b82d731 --- /dev/null +++ b/ksql-core/src/main/java/io/confluent/ksql/metastore/StructuredDataSource.java @@ -0,0 +1,74 @@ +/** + * Copyright 2017 Confluent Inc. + **/ + +package io.confluent.ksql.metastore; + +import io.confluent.ksql.util.KsqlException; +import org.apache.kafka.connect.data.Field; +import org.apache.kafka.connect.data.Schema; + +public abstract class StructuredDataSource implements DataSource { + + final String dataSourceName; + final DataSourceType dataSourceType; + final Schema schema; + final Field keyField; + final Field timestampField; + + final KsqlTopic ksqlTopic; + + + public StructuredDataSource(final String datasourceName, final Schema schema, + final Field keyField, + final Field timestampField, + final DataSourceType dataSourceType, final KsqlTopic ksqlTopic) { + this.dataSourceName = datasourceName; + this.schema = schema; + this.keyField = keyField; + this.timestampField = timestampField; + this.dataSourceType = dataSourceType; + this.ksqlTopic = ksqlTopic; + } + + public static DataSourceType getDataSourceType(String dataSourceTypeName) { + switch (dataSourceTypeName) { + case "STREAM": + return DataSourceType.KSTREAM; + case "TABLE": + return DataSourceType.KTABLE; + default: + throw new KsqlException("DataSource Type is not supported: " + dataSourceTypeName); + } + } + + @Override + public String getName() { + return this.dataSourceName; + } + + public Schema getSchema() { + return this.schema; + } + + public Field getKeyField() { + return this.keyField; + } + + @Override + public DataSourceType getDataSourceType() { + return this.dataSourceType; + } + + public KsqlTopic getKsqlTopic() { + return ksqlTopic; + } + + public Field getTimestampField() { + return timestampField; + } + + public abstract StructuredDataSource cloneWithTimeKeyColumns(); + + public abstract StructuredDataSource cloneWithTimeField(String timestampfieldName); +} diff --git a/ksql-core/src/main/java/io/confluent/ksql/parser/AstBuilder.java b/ksql-core/src/main/java/io/confluent/ksql/parser/AstBuilder.java new file mode 100644 index 000000000000..e120a0ae3c25 --- /dev/null +++ b/ksql-core/src/main/java/io/confluent/ksql/parser/AstBuilder.java @@ -0,0 +1,1430 @@ +/** + * Copyright 2017 Confluent Inc. + **/ + +package io.confluent.ksql.parser; + +import com.google.common.collect.ImmutableList; +import com.google.common.collect.ImmutableMap; +import com.google.common.collect.Lists; +import io.confluent.ksql.metastore.KsqlStream; +import io.confluent.ksql.metastore.KsqlTopic; +import io.confluent.ksql.metastore.StructuredDataSource; +import io.confluent.ksql.parser.SqlBaseParser.TablePropertiesContext; +import io.confluent.ksql.parser.SqlBaseParser.TablePropertyContext; +import io.confluent.ksql.parser.tree.*; +import io.confluent.ksql.util.DataSourceExtractor; +import io.confluent.ksql.util.KsqlException; +import org.antlr.v4.runtime.ParserRuleContext; +import org.antlr.v4.runtime.Token; +import org.antlr.v4.runtime.tree.TerminalNode; +import org.apache.kafka.connect.data.Field; +import org.apache.kafka.connect.data.Schema; +import org.apache.kafka.connect.data.SchemaBuilder; + +import java.util.ArrayList; +import java.util.Arrays; +import java.util.List; +import java.util.Map; +import java.util.Optional; +import java.util.stream.Collectors; + +import static java.lang.String.format; +import static java.util.Objects.requireNonNull; +import static java.util.stream.Collectors.toList; + +public class AstBuilder + extends SqlBaseBaseVisitor { + + private int selectItemIndex = 0; + + public static final String DEFAULT_WINDOW_NAME = "StreamWindow"; + + private DataSourceExtractor dataSourceExtractor; + public StructuredDataSource resultDataSource = null; + + public AstBuilder(DataSourceExtractor dataSourceExtractor) { + this.dataSourceExtractor = dataSourceExtractor; + } + + @Override + public Node visitStatements(SqlBaseParser.StatementsContext context) { + List statementList = new ArrayList<>(); + for (SqlBaseParser.SingleStatementContext singleStatementContext : context.singleStatement()) { + Statement statement = (Statement) visitSingleStatement(singleStatementContext); + statementList.add(statement); + } + return new Statements(statementList); + } + + @Override + public Node visitSingleStatement(SqlBaseParser.SingleStatementContext context) { + return (Statement) visit(context.statement()); + } + + @Override + public Node visitQuerystatement(SqlBaseParser.QuerystatementContext ctx) { + return (Statement) visitChildren(ctx); + } + + @Override + public Node visitSingleExpression(SqlBaseParser.SingleExpressionContext context) { + return visit(context.expression()); + } + + // ******************* statements ********************** + + + private Map processTableProperties( + TablePropertiesContext tablePropertiesContext) { + ImmutableMap.Builder properties = ImmutableMap.builder(); + if (tablePropertiesContext != null) { + for (TablePropertyContext tablePropertyContext : tablePropertiesContext.tableProperty()) { + properties.put(getIdentifierText(tablePropertyContext.identifier()), + (Expression) visit(tablePropertyContext.expression())); + } + } + return properties.build(); + } + + @Override + public Node visitIsolationLevel(SqlBaseParser.IsolationLevelContext context) { + return visit(context.levelOfIsolation()); + } + + @Override + public Node visitCreateTable(SqlBaseParser.CreateTableContext context) { + return new CreateTable(getLocation(context), getQualifiedName(context.qualifiedName()), + visit(context.tableElement(), TableElement.class), + context.EXISTS() != null, + processTableProperties(context.tableProperties())); + } + + @Override + public Node visitRegisterTopic(SqlBaseParser.RegisterTopicContext context) { + return new RegisterTopic(getLocation(context), getQualifiedName(context.qualifiedName()), + context.EXISTS() != null, + processTableProperties(context.tableProperties())); + } + + @Override + public Node visitCreateStream(SqlBaseParser.CreateStreamContext context) { + return new CreateStream(getLocation(context), getQualifiedName(context.qualifiedName()), + visit(context.tableElement(), TableElement.class), + context.EXISTS() != null, + processTableProperties(context.tableProperties())); + } + + @Override + public Node visitCreateStreamAs(SqlBaseParser.CreateStreamAsContext context) { + Optional partitionByColumn = Optional.empty(); + if (context.identifier() != null) { + partitionByColumn = Optional.of(new QualifiedNameReference( + QualifiedName.of(getIdentifierText(context.identifier())))); + } + + return new CreateStreamAsSelect(getLocation(context), getQualifiedName(context.qualifiedName()), + (Query) visitQuery(context.query()), + context.EXISTS() != null, + processTableProperties(context.tableProperties()), + partitionByColumn); + } + + @Override + public Node visitCreateTableAs(SqlBaseParser.CreateTableAsContext context) { + return new CreateTableAsSelect(getLocation(context), getQualifiedName(context.qualifiedName()), + (Query) visitQuery(context.query()), + context.EXISTS() != null, + processTableProperties(context.tableProperties())); + } + + @Override + public Node visitDropTopic(SqlBaseParser.DropTopicContext context) { + return new DropTopic(getLocation(context), getQualifiedName(context.qualifiedName()), + context.EXISTS() != null); + } + + @Override + public Node visitDropTable(SqlBaseParser.DropTableContext context) { + return new DropTable(getLocation(context), getQualifiedName(context.qualifiedName()), + context.EXISTS() != null); + } + + @Override + public Node visitDropStream(SqlBaseParser.DropStreamContext context) { + return new DropStream(getLocation(context), getQualifiedName(context.qualifiedName()), + context.EXISTS() != null); + } + + // ********************** query expressions ******************** + + @Override + public Node visitQuery(SqlBaseParser.QueryContext context) { + Query body = (Query) visit(context.queryNoWith()); + + return new Query( + getLocation(context), + visitIfPresent(context.with(), With.class), + body.getQueryBody(), + body.getOrderBy(), + body.getLimit()); + } + + @Override + public Node visitWith(SqlBaseParser.WithContext context) { + return new With(getLocation(context), context.RECURSIVE() != null, + visit(context.namedQuery(), WithQuery.class)); + } + + @Override + public Node visitNamedQuery(SqlBaseParser.NamedQueryContext context) { + return new WithQuery(getLocation(context), context.name.getText(), + (Query) visit(context.query()), + Optional.ofNullable(getColumnAliases(context.columnAliases()))); + } + + @Override + public Node visitQueryNoWith(SqlBaseParser.QueryNoWithContext context) { + + QueryBody term = (QueryBody) visit(context.queryTerm()); + + if (term instanceof QuerySpecification) { + // When we have a simple query specification + // followed by order by limit, fold the order by and limit + // clauses into the query specification (analyzer/planner + // expects this structure to resolve references with respect + // to columns defined in the query specification) + QuerySpecification query = (QuerySpecification) term; + return new Query( + getLocation(context), + Optional.empty(), + new QuerySpecification( + getLocation(context), + query.getSelect(), + query.getInto(), + query.getFrom(), + query.getWindowExpression(), + query.getWhere(), + query.getGroupBy(), + query.getHaving(), + visit(context.sortItem(), SortItem.class), + getTextIfPresent(context.limit)), + ImmutableList.of(), + Optional.empty()); + } + + return new Query( + getLocation(context), + Optional.empty(), + term, + visit(context.sortItem(), SortItem.class), + getTextIfPresent(context.limit)); + } + + + @Override + public Node visitQuerySpecification(SqlBaseParser.QuerySpecificationContext context) { + Table into; + if (context.into != null) { + into = (Table) visit(context.into); + } else { + // TODO: Generate a unique name + String intoName = "KSQL_Stream_" + System.currentTimeMillis(); + into = new Table(QualifiedName.of(intoName), true); + } + + Relation from = (Relation) visit(context.from); + + + + Select + select = + new Select(getLocation(context.SELECT()), isDistinct(context.setQuantifier()), + visit(context.selectItem(), SelectItem.class)); + select = new Select(getLocation(context.SELECT()), select.isDistinct(), + extractSelectItems(select, from)); + this.resultDataSource = getResultDatasource(select, into); + + return new QuerySpecification( + getLocation(context), + select, + Optional.of(into), + Optional.of(from), + visitIfPresent(context.windowExpression(), WindowExpression.class), + visitIfPresent(context.where, Expression.class), + visitIfPresent(context.groupBy(), GroupBy.class), + visitIfPresent(context.having, Expression.class), + ImmutableList.of(), + Optional.empty()); + } + + private List extractSelectItems(Select select, Relation from) { + List selectItems = new ArrayList<>(); + for (SelectItem selectItem : select.getSelectItems()) { + if (selectItem instanceof AllColumns) { + selectItems.addAll(getSelectStartItems(selectItem, from)); + + } else if (selectItem instanceof SingleColumn) { + selectItems.add((SingleColumn) selectItem); + } else { + throw new IllegalArgumentException( + "Unsupported SelectItem type: " + selectItem.getClass().getName()); + } + } + return selectItems; + } + + private List getSelectStartItems(final SelectItem selectItem, final Relation from) { + List selectItems = new ArrayList<>(); + AllColumns allColumns = (AllColumns) selectItem; + + if (from instanceof Join) { + Join join = (Join) from; + AliasedRelation left = (AliasedRelation) join.getLeft(); + StructuredDataSource + leftDataSource = + dataSourceExtractor.getMetaStore().getSource(left.getRelation().toString()); + if (leftDataSource == null) { + throw new InvalidColumnReferenceException(left.getRelation().toString() + + " does not exist."); + } + AliasedRelation right = (AliasedRelation) join.getRight(); + StructuredDataSource rightDataSource = + dataSourceExtractor.getMetaStore().getSource(right.getRelation().toString()); + if (rightDataSource == null) { + throw new InvalidColumnReferenceException(right.getRelation().toString() + + " does not exist."); + } + for (Field field : leftDataSource.getSchema().fields()) { + QualifiedNameReference + qualifiedNameReference = + new QualifiedNameReference(allColumns.getLocation().get(), + QualifiedName.of(left.getAlias() + "." + field.name())); + SingleColumn + newSelectItem = + new SingleColumn(qualifiedNameReference, + left.getAlias() + "_" + field.name()); + selectItems.add(newSelectItem); + } + for (Field field : rightDataSource.getSchema().fields()) { + QualifiedNameReference + qualifiedNameReference = + new QualifiedNameReference(allColumns.getLocation().get(), + QualifiedName.of(right.getAlias() + "." + field.name())); + SingleColumn + newSelectItem = + new SingleColumn(qualifiedNameReference, + right.getAlias() + "_" + field.name()); + selectItems.add(newSelectItem); + } + } else { + AliasedRelation fromRel = (AliasedRelation) from; + StructuredDataSource + fromDataSource = + dataSourceExtractor.getMetaStore() + .getSource(((Table) fromRel.getRelation()).getName().getSuffix()); + if (fromDataSource == null) { + throw new InvalidColumnReferenceException( + ((Table) fromRel.getRelation()).getName().getSuffix() + " does not exist." + ); + } + for (Field field : fromDataSource.getSchema().fields()) { + QualifiedNameReference + qualifiedNameReference = + new QualifiedNameReference(allColumns.getLocation().get(), QualifiedName + .of(fromDataSource.getName() + "." + field.name())); + SingleColumn + newSelectItem = + new SingleColumn(qualifiedNameReference, field.name()); + selectItems.add(newSelectItem); + } + } + return selectItems; + } + + @Override + public Node visitWindowExpression(SqlBaseParser.WindowExpressionContext ctx) { + String windowName = DEFAULT_WINDOW_NAME; + if (ctx.IDENTIFIER() != null) { + windowName = ctx.IDENTIFIER().getText(); + } + windowName = windowName.toUpperCase(); + if (ctx.tumblingWindowExpression() != null) { + TumblingWindowExpression tumblingWindowExpression = (TumblingWindowExpression) + visitTumblingWindowExpression(ctx.tumblingWindowExpression()); + return new WindowExpression(windowName, tumblingWindowExpression); + } else if (ctx.hoppingWindowExpression() != null) { + HoppingWindowExpression hoppingWindowExpression = (HoppingWindowExpression) + visitHoppingWindowExpression(ctx.hoppingWindowExpression()); + + return new WindowExpression(windowName, hoppingWindowExpression); + } else if (ctx.sessionWindowExpression() != null) { + SessionWindowExpression sessionWindowExpression = (SessionWindowExpression) + visitSessionWindowExpression(ctx.sessionWindowExpression()); + return new WindowExpression(windowName, sessionWindowExpression); + } + throw new KsqlException("Window description is not correct."); + } + + @Override + public Node visitHoppingWindowExpression(SqlBaseParser.HoppingWindowExpressionContext ctx) { + + List numberList = ctx.number(); + List windowUnits = ctx.windowUnit(); + String sizeStr = numberList.get(0).getText(); + String advanceByStr = numberList.get(1).getText(); + + String sizeUnit = windowUnits.get(0).getText(); + String advanceByUnit = windowUnits.get(1).getText(); + return new HoppingWindowExpression( + Long.parseLong(sizeStr), + WindowExpression.getWindowUnit(sizeUnit.toUpperCase()), + Long.parseLong(advanceByStr), + WindowExpression.getWindowUnit(advanceByUnit.toUpperCase()) + ); + } + + @Override + public Node visitTumblingWindowExpression(SqlBaseParser.TumblingWindowExpressionContext ctx) { + String sizeStr = ctx.number().getText(); + String sizeUnit = ctx.windowUnit().getText(); + return new TumblingWindowExpression( + Long.parseLong(sizeStr), + WindowExpression.getWindowUnit(sizeUnit.toUpperCase()) + ); + } + + @Override + public Node visitSessionWindowExpression(SqlBaseParser.SessionWindowExpressionContext ctx) { + String sizeStr = ctx.number().getText(); + String sizeUnit = ctx.windowUnit().getText(); + return new SessionWindowExpression( + Long.parseLong(sizeStr), + WindowExpression.getWindowUnit(sizeUnit.toUpperCase()) + ); + } + + @Override + public Node visitGroupBy(SqlBaseParser.GroupByContext context) { + return new GroupBy(getLocation(context), isDistinct(context.setQuantifier()), + visit(context.groupingElement(), GroupingElement.class)); + } + + @Override + public Node visitSingleGroupingSet(SqlBaseParser.SingleGroupingSetContext context) { + return new SimpleGroupBy(getLocation(context), + visit(context.groupingExpressions().expression(), Expression.class)); + } + + @Override + public Node visitMultipleGroupingSets(SqlBaseParser.MultipleGroupingSetsContext context) { + return new GroupingSets(getLocation(context), context.groupingSet().stream() + .map(groupingSet -> groupingSet.qualifiedName().stream() + .map(AstBuilder::getQualifiedName) + .collect(toList())) + .collect(toList())); + } + + @Override + public Node visitSetOperation(SqlBaseParser.SetOperationContext context) { + QueryBody left = (QueryBody) visit(context.left); + QueryBody right = (QueryBody) visit(context.right); + + boolean + distinct = + context.setQuantifier() == null || context.setQuantifier().DISTINCT() != null; + + switch (context.operator.getType()) { + case SqlBaseLexer.UNION: + return new Union(getLocation(context.UNION()), ImmutableList.of(left, right), distinct); + case SqlBaseLexer.INTERSECT: + return new Intersect(getLocation(context.INTERSECT()), ImmutableList.of(left, right), + distinct); + case SqlBaseLexer.EXCEPT: + return new Except(getLocation(context.EXCEPT()), left, right, distinct); + default: + throw new IllegalArgumentException("Unsupported set operation: " + + context.operator.getText()); + } + } + + @Override + public Node visitSelectAll(SqlBaseParser.SelectAllContext context) { + if (context.qualifiedName() != null) { + return new AllColumns(getLocation(context), getQualifiedName(context.qualifiedName())); + } + + return new AllColumns(getLocation(context)); + } + + @Override + public Node visitSelectSingle(SqlBaseParser.SelectSingleContext context) { + Expression selectItemExpression = (Expression) visit(context.expression()); + Optional alias = Optional.ofNullable( + context.identifier()).map(AstBuilder::getIdentifierText); + if (!alias.isPresent()) { + if (selectItemExpression instanceof QualifiedNameReference) { + QualifiedNameReference + qualifiedNameReference = + (QualifiedNameReference) selectItemExpression; + alias = Optional.of(qualifiedNameReference.getName().getSuffix()); + } else if (selectItemExpression instanceof DereferenceExpression) { + DereferenceExpression dereferenceExpression = (DereferenceExpression) selectItemExpression; + if ((dataSourceExtractor.getJoinLeftSchema() != null) && (dataSourceExtractor + .getCommonFieldNames() + .contains( + dereferenceExpression + .getFieldName()))) { + alias = + Optional.of(dereferenceExpression.getBase().toString() + "_" + + dereferenceExpression.getFieldName()); + } else { + alias = Optional.of(dereferenceExpression.getFieldName()); + } + } else { + alias = Optional.of("KSQL_COL_" + selectItemIndex); + } + } else { + alias = Optional.of(alias.get()); + } + selectItemIndex++; + return new SingleColumn(getLocation(context), selectItemExpression, alias); + } + + @Override + public Node visitQualifiedName(SqlBaseParser.QualifiedNameContext context) { + return visitChildren(context); + } + + @Override + public Node visitTable(SqlBaseParser.TableContext context) { + return new Table(getLocation(context), getQualifiedName(context.qualifiedName())); + } + + @Override + public Node visitExportCatalog(SqlBaseParser.ExportCatalogContext context) { + return new ExportCatalog(Optional.ofNullable(getLocation(context)), context.STRING().getText()); + } + + @Override + public Node visitRunScript(SqlBaseParser.RunScriptContext context) { + return new RunScript(Optional.ofNullable(getLocation(context)), context.STRING().getText()); + } + + @Override + public Node visitListRegisteredTopics(SqlBaseParser.ListRegisteredTopicsContext context) { + return new ListRegisteredTopics(Optional.ofNullable(getLocation(context))); + } + + @Override + public Node visitListTopics(SqlBaseParser.ListTopicsContext context) { + return new ListTopics(Optional.ofNullable(getLocation(context))); + } + + @Override + public Node visitListStreams(SqlBaseParser.ListStreamsContext context) { + return new ListStreams(Optional.ofNullable(getLocation(context))); + } + + @Override + public Node visitListTables(SqlBaseParser.ListTablesContext context) { + return new ListTables(Optional.ofNullable(getLocation(context))); + } + + + @Override + public Node visitListQueries(SqlBaseParser.ListQueriesContext context) { + return new ListQueries(Optional.ofNullable(getLocation(context))); + } + + @Override + public Node visitTerminateQuery(SqlBaseParser.TerminateQueryContext context) { + return new TerminateQuery(getLocation(context), + Long.parseLong(context.INTEGER_VALUE().getText())); + } + + @Override + public Node visitShowColumns(SqlBaseParser.ShowColumnsContext context) { + return new ShowColumns(getLocation(context), getQualifiedName(context.qualifiedName()), + context.TOPIC() != null); + } + + @Override + public Node visitListProperties(SqlBaseParser.ListPropertiesContext context) { + return new ListProperties(Optional.ofNullable(getLocation(context))); + } + + @Override + public Node visitSetProperty(SqlBaseParser.SetPropertyContext context) { + String propertyName = unquote(context.STRING(0).getText(), "'"); + String propertyValue = unquote(context.STRING(1).getText(), "'"); + return new SetProperty(Optional.ofNullable(getLocation(context)), propertyName, propertyValue); + } + + @Override + public Node visitUnsetProperty(SqlBaseParser.UnsetPropertyContext context) { + String propertyName = unquote(context.STRING().getText(), "'"); + return new UnsetProperty(Optional.ofNullable(getLocation(context)), propertyName); + } + + @Override + public Node visitPrintTopic(SqlBaseParser.PrintTopicContext context) { + boolean fromBeginning = context.FROM() != null; + if (context.number() == null) { + return new PrintTopic( + getLocation(context), + getQualifiedName(context.qualifiedName()), + fromBeginning, + null + ); + } else if (context.number() instanceof SqlBaseParser.IntegerLiteralContext) { + SqlBaseParser.IntegerLiteralContext + integerLiteralContext = + (SqlBaseParser.IntegerLiteralContext) context.number(); + return new PrintTopic( + getLocation(context), + getQualifiedName(context.qualifiedName()), + fromBeginning, + (LongLiteral) visitIntegerLiteral(integerLiteralContext) + ); + } else { + throw new KsqlException("Interval value should be integer in 'PRINT' command!"); + } + + } + + @Override + public Node visitNumericLiteral(SqlBaseParser.NumericLiteralContext ctx) { + return visitChildren(ctx); + } + + @Override + public Node visitSubquery(SqlBaseParser.SubqueryContext context) { + return new TableSubquery(getLocation(context), (Query) visit(context.queryNoWith())); + } + + @Override + public Node visitInlineTable(SqlBaseParser.InlineTableContext context) { + return new Values(getLocation(context), visit(context.expression(), Expression.class)); + } + + @Override + public Node visitExplainFormat(SqlBaseParser.ExplainFormatContext context) { + switch (context.value.getType()) { + case SqlBaseLexer.GRAPHVIZ: + return new ExplainFormat(getLocation(context), ExplainFormat.Type.GRAPHVIZ); + case SqlBaseLexer.TEXT: + return new ExplainFormat(getLocation(context), ExplainFormat.Type.TEXT); + default: + throw new IllegalArgumentException("Unsupported EXPLAIN format: " + + context.value.getText()); + } + } + + @Override + public Node visitExplainType(SqlBaseParser.ExplainTypeContext context) { + switch (context.value.getType()) { + case SqlBaseLexer.LOGICAL: + return new ExplainType(getLocation(context), ExplainType.Type.LOGICAL); + case SqlBaseLexer.DISTRIBUTED: + return new ExplainType(getLocation(context), ExplainType.Type.DISTRIBUTED); + default: + throw new IllegalArgumentException("Unsupported EXPLAIN type: " + context.value.getText()); + } + } + + // ***************** boolean expressions ****************** + + @Override + public Node visitLogicalNot(SqlBaseParser.LogicalNotContext context) { + return new NotExpression(getLocation(context), (Expression) visit(context.booleanExpression())); + } + + @Override + public Node visitLogicalBinary(SqlBaseParser.LogicalBinaryContext context) { + return new LogicalBinaryExpression( + getLocation(context.operator), + getLogicalBinaryOperator(context.operator), + (Expression) visit(context.left), + (Expression) visit(context.right)); + } + + // *************** from clause ***************** + + @Override + public Node visitJoinRelation(SqlBaseParser.JoinRelationContext context) { + Relation left = (Relation) visit(context.left); + Relation right; + + if (context.CROSS() != null) { + right = (Relation) visit(context.right); + return new Join(getLocation(context), Join.Type.CROSS, left, right, + Optional.empty()); + } + + JoinCriteria criteria; + if (context.NATURAL() != null) { + right = (Relation) visit(context.right); + criteria = new NaturalJoin(); + } else { + right = (Relation) visit(context.rightRelation); + if (context.joinCriteria().ON() != null) { + criteria = new JoinOn((Expression) visit(context.joinCriteria().booleanExpression())); + } else if (context.joinCriteria().USING() != null) { + List columns = context.joinCriteria() + .identifier().stream() + .map(AstBuilder::getIdentifierText) + .collect(toList()); + + criteria = new JoinUsing(columns); + } else { + throw new IllegalArgumentException("Unsupported join criteria"); + } + } + + Join.Type joinType; + if (context.joinType().LEFT() != null) { + joinType = Join.Type.LEFT; + } else if (context.joinType().RIGHT() != null) { + joinType = Join.Type.RIGHT; + } else if (context.joinType().FULL() != null) { + joinType = Join.Type.FULL; + } else { + joinType = Join.Type.INNER; + } + + return new Join(getLocation(context), joinType, left, right, Optional.of(criteria)); + } + + @Override + public Node visitAliasedRelation(SqlBaseParser.AliasedRelationContext context) { + Relation child = (Relation) visit(context.relationPrimary()); + + String alias = null; + if (context.children.size() == 1) { + Table table = (Table) visit(context.relationPrimary()); + alias = table.getName().getSuffix(); + + } else if (context.children.size() == 2) { + alias = context.children.get(1).getText(); + } + + // TODO: Figure out if the call to toUpperCase() here is really necessary + return new AliasedRelation(getLocation(context), child, alias.toUpperCase(), + getColumnAliases(context.columnAliases())); + + } + + @Override + public Node visitTableName(SqlBaseParser.TableNameContext context) { + + Table table = new Table(getLocation(context), getQualifiedName(context.qualifiedName())); + if (context.tableProperties() != null) { + table.setProperties(processTableProperties(context.tableProperties())); + } + return table; + } + + @Override + public Node visitSubqueryRelation(SqlBaseParser.SubqueryRelationContext context) { + return new TableSubquery(getLocation(context), (Query) visit(context.query())); + } + + @Override + public Node visitParenthesizedRelation(SqlBaseParser.ParenthesizedRelationContext context) { + return visit(context.relation()); + } + + // ********************* predicates ******************* + + @Override + public Node visitPredicated(SqlBaseParser.PredicatedContext context) { + if (context.predicate() != null) { + return visit(context.predicate()); + } + + return visit(context.valueExpression); + } + + @Override + public Node visitComparison(SqlBaseParser.ComparisonContext context) { + return new ComparisonExpression( + getLocation(context.comparisonOperator()), + getComparisonOperator( + ((TerminalNode) context.comparisonOperator().getChild(0)).getSymbol()), + (Expression) visit(context.value), + (Expression) visit(context.right)); + } + + @Override + public Node visitDistinctFrom(SqlBaseParser.DistinctFromContext context) { + Expression expression = new ComparisonExpression( + getLocation(context), + ComparisonExpression.Type.IS_DISTINCT_FROM, + (Expression) visit(context.value), + (Expression) visit(context.right)); + + if (context.NOT() != null) { + expression = new NotExpression(getLocation(context), expression); + } + + return expression; + } + + @Override + public Node visitBetween(SqlBaseParser.BetweenContext context) { + Expression expression = new BetweenPredicate( + getLocation(context), + (Expression) visit(context.value), + (Expression) visit(context.lower), + (Expression) visit(context.upper)); + + if (context.NOT() != null) { + expression = new NotExpression(getLocation(context), expression); + } + + return expression; + } + + @Override + public Node visitNullPredicate(SqlBaseParser.NullPredicateContext context) { + Expression child = (Expression) visit(context.value); + + if (context.NOT() == null) { + return new IsNullPredicate(getLocation(context), child); + } + + return new IsNotNullPredicate(getLocation(context), child); + } + + @Override + public Node visitLike(SqlBaseParser.LikeContext context) { + Expression escape = null; + if (context.escape != null) { + escape = (Expression) visit(context.escape); + } + + Expression + result = + new LikePredicate(getLocation(context), (Expression) visit(context.value), + (Expression) visit(context.pattern), escape); + + if (context.NOT() != null) { + result = new NotExpression(getLocation(context), result); + } + + return result; + } + + @Override + public Node visitInList(SqlBaseParser.InListContext context) { + Expression result = new InPredicate( + getLocation(context), + (Expression) visit(context.value), + new InListExpression(getLocation(context), visit(context.expression(), Expression.class))); + + if (context.NOT() != null) { + result = new NotExpression(getLocation(context), result); + } + + return result; + } + + @Override + public Node visitInSubquery(SqlBaseParser.InSubqueryContext context) { + Expression result = new InPredicate( + getLocation(context), + (Expression) visit(context.value), + new SubqueryExpression(getLocation(context), (Query) visit(context.query()))); + + if (context.NOT() != null) { + result = new NotExpression(getLocation(context), result); + } + + return result; + } + + @Override + public Node visitExists(SqlBaseParser.ExistsContext context) { + return new ExistsPredicate(getLocation(context), (Query) visit(context.query())); + } + + // ************** value expressions ************** + + @Override + public Node visitArithmeticUnary(SqlBaseParser.ArithmeticUnaryContext context) { + Expression child = (Expression) visit(context.valueExpression()); + + switch (context.operator.getType()) { + case SqlBaseLexer.MINUS: + return ArithmeticUnaryExpression.negative(getLocation(context), child); + case SqlBaseLexer.PLUS: + return ArithmeticUnaryExpression.positive(getLocation(context), child); + default: + throw new UnsupportedOperationException("Unsupported sign: " + context.operator.getText()); + } + } + + @Override + public Node visitArithmeticBinary(SqlBaseParser.ArithmeticBinaryContext context) { + return new ArithmeticBinaryExpression( + getLocation(context.operator), + getArithmeticBinaryOperator(context.operator), + (Expression) visit(context.left), + (Expression) visit(context.right)); + } + + @Override + public Node visitConcatenation(SqlBaseParser.ConcatenationContext context) { + return new FunctionCall( + getLocation(context.CONCAT()), + QualifiedName.of("concat"), ImmutableList.of( + (Expression) visit(context.left), + (Expression) visit(context.right))); + } + + @Override + public Node visitTimeZoneInterval(SqlBaseParser.TimeZoneIntervalContext context) { + return visit(context.interval()); + } + + @Override + public Node visitTimeZoneString(SqlBaseParser.TimeZoneStringContext context) { + return new StringLiteral(getLocation(context), unquote(context.STRING().getText(), "'")); + } + + // ********************* primary expressions ********************** + + @Override + public Node visitParenthesizedExpression(SqlBaseParser.ParenthesizedExpressionContext context) { + return visit(context.expression()); + } + + @Override + public Node visitRowConstructor(SqlBaseParser.RowConstructorContext context) { + return new Row(getLocation(context), visit(context.expression(), Expression.class)); + } + + + @Override + public Node visitCast(SqlBaseParser.CastContext context) { + boolean isTryCast = context.TRY_CAST() != null; + return new Cast(getLocation(context), (Expression) visit(context.expression()), + getType(context.type()), isTryCast); + } + + @Override + public Node visitExtract(SqlBaseParser.ExtractContext context) { + String fieldString = getIdentifierText(context.identifier()); + Extract.Field field; + try { + field = Extract.Field.valueOf(fieldString); + } catch (IllegalArgumentException e) { + throw new ParsingException(format("Invalid EXTRACT field: %s", fieldString), null, + context.getStart().getLine(), + context.getStart().getCharPositionInLine()); + } + return new Extract(getLocation(context), (Expression) visit(context.valueExpression()), field); + } + + @Override + public Node visitSubstring(SqlBaseParser.SubstringContext context) { + return new FunctionCall(getLocation(context), QualifiedName.of("SUBSTR"), + visit(context.valueExpression(), Expression.class)); + } + + @Override + public Node visitPosition(SqlBaseParser.PositionContext context) { + List arguments = Lists.reverse(visit(context.valueExpression(), Expression.class)); + return new FunctionCall(getLocation(context), QualifiedName.of("STRPOS"), arguments); + } + + @Override + public Node visitNormalize(SqlBaseParser.NormalizeContext context) { + Expression str = (Expression) visit(context.valueExpression()); + String + normalForm = + Optional.ofNullable( + context.normalForm()).map(ParserRuleContext::getText).orElse("NFC"); + return new FunctionCall(getLocation(context), QualifiedName.of("NORMALIZE"), ImmutableList + .of(str, new StringLiteral(getLocation(context), normalForm))); + } + + @Override + public Node visitSubscript(SqlBaseParser.SubscriptContext context) { + return new SubscriptExpression(getLocation(context), (Expression) visit(context.value), + (Expression) visit(context.index)); + } + + @Override + public Node visitSubqueryExpression(SqlBaseParser.SubqueryExpressionContext context) { + return new SubqueryExpression(getLocation(context), (Query) visit(context.query())); + } + + @Override + public Node visitDereference(SqlBaseParser.DereferenceContext context) { + String fieldName = getIdentifierText(context.identifier()); + Expression baseExpression; + QualifiedName tableName = QualifiedName.of( + context.primaryExpression().getText().toUpperCase()); + baseExpression = new QualifiedNameReference( + getLocation(context.primaryExpression()), tableName); + DereferenceExpression + dereferenceExpression = + new DereferenceExpression(getLocation(context), baseExpression, fieldName); + return dereferenceExpression; + } + + @Override + public Node visitColumnReference(SqlBaseParser.ColumnReferenceContext context) { + String columnName = getIdentifierText(context.identifier()); + // If this is join. + if (dataSourceExtractor.getJoinLeftSchema() != null) { + if (dataSourceExtractor.getCommonFieldNames().contains(columnName)) { + throw new KsqlException("Field " + columnName + " is ambiguous."); + } else if (dataSourceExtractor.getLeftFieldNames().contains(columnName)) { + Expression + baseExpression = + new QualifiedNameReference(getLocation(context), + QualifiedName.of(dataSourceExtractor.getLeftAlias())); + return new DereferenceExpression(getLocation(context), baseExpression, columnName); + } else if (dataSourceExtractor.getRightFieldNames().contains(columnName)) { + Expression + baseExpression = + new QualifiedNameReference(getLocation(context), + QualifiedName.of(dataSourceExtractor.getRightAlias())); + return new DereferenceExpression(getLocation(context), baseExpression, columnName); + } else { + throw new InvalidColumnReferenceException("Field " + columnName + " is ambiguous."); + } + } else { + Expression + baseExpression = + new QualifiedNameReference(getLocation(context), + QualifiedName.of(dataSourceExtractor.getFromAlias())); + return new DereferenceExpression(getLocation(context), baseExpression, columnName); + } + } + + @Override + public Node visitSimpleCase(SqlBaseParser.SimpleCaseContext context) { + return new SimpleCaseExpression( + getLocation(context), + (Expression) visit(context.valueExpression()), + visit(context.whenClause(), WhenClause.class), + visitIfPresent(context.elseExpression, Expression.class)); + } + + @Override + public Node visitSearchedCase(SqlBaseParser.SearchedCaseContext context) { + return new SearchedCaseExpression( + getLocation(context), + visit(context.whenClause(), WhenClause.class), + visitIfPresent(context.elseExpression, Expression.class)); + } + + @Override + public Node visitWhenClause(SqlBaseParser.WhenClauseContext context) { + return new WhenClause(getLocation(context), (Expression) visit(context.condition), + (Expression) visit(context.result)); + } + + @Override + public Node visitFunctionCall(SqlBaseParser.FunctionCallContext context) { + Optional window = visitIfPresent(context.over(), Window.class); + + QualifiedName name = getQualifiedName(context.qualifiedName()); + + boolean distinct = isDistinct(context.setQuantifier()); + + if (name.toString().equals("NULLIF")) { + check(context.expression().size() == 2, + "Invalid number of arguments for 'nullif' function", + context); + check(!window.isPresent(), "OVER clause not valid for 'nullif' function", context); + check(!distinct, "DISTINCT not valid for 'nullif' function", context); + + return new NullIfExpression( + getLocation(context), + (Expression) visit(context.expression(0)), + (Expression) visit(context.expression(1))); + } + + return new FunctionCall( + getLocation(context), + getQualifiedName(context.qualifiedName()), + window, + distinct, + visit(context.expression(), Expression.class)); + } + + @Override + public Node visitLambda(SqlBaseParser.LambdaContext context) { + List arguments = context.identifier().stream() + .map(AstBuilder::getIdentifierText) + .collect(toList()); + + Expression body = (Expression) visit(context.expression()); + + return new LambdaExpression(arguments, body); + } + + + @Override + public Node visitTableElement(SqlBaseParser.TableElementContext context) { + return new TableElement(getLocation(context), getIdentifierText(context.identifier()), + getType(context.type())); + } + + + // ************** literals ************** + + @Override + public Node visitNullLiteral(SqlBaseParser.NullLiteralContext context) { + return new NullLiteral(getLocation(context)); + } + + @Override + public Node visitStringLiteral(SqlBaseParser.StringLiteralContext context) { + return new StringLiteral(getLocation(context), unquote(context.STRING().getText(), "'")); + } + + @Override + public Node visitBinaryLiteral(SqlBaseParser.BinaryLiteralContext context) { + String raw = context.BINARY_LITERAL().getText(); + return new BinaryLiteral(getLocation(context), unquote(raw.substring(1), "'")); + } + + @Override + public Node visitTypeConstructor(SqlBaseParser.TypeConstructorContext context) { + String type = getIdentifierText(context.identifier()); + String value = unquote(context.STRING().getText(), "'"); + + if (type.equals("TIME")) { + return new TimeLiteral(getLocation(context), value); + } + if (type.equals("TIMESTAMP")) { + return new TimestampLiteral(getLocation(context), value); + } + if (type.equals("DECIMAL")) { + return new DecimalLiteral(getLocation(context), value); + } + + return new GenericLiteral(getLocation(context), type, value); + } + + @Override + public Node visitIntegerLiteral(SqlBaseParser.IntegerLiteralContext context) { + return new LongLiteral(getLocation(context), context.getText()); + } + + @Override + public Node visitDecimalLiteral(SqlBaseParser.DecimalLiteralContext context) { + return new DoubleLiteral(getLocation(context), context.getText()); + } + + @Override + public Node visitBooleanValue(SqlBaseParser.BooleanValueContext context) { + return new BooleanLiteral(getLocation(context), context.getText()); + } + + @Override + public Node visitInterval(SqlBaseParser.IntervalContext context) { + return new IntervalLiteral( + getLocation(context), + unquote(context.STRING().getText(), "'"), + Optional.ofNullable(context.sign) + .map(AstBuilder::getIntervalSign) + .orElse(IntervalLiteral.Sign.POSITIVE), + getIntervalFieldType((Token) context.from.getChild(0).getPayload()), + Optional.ofNullable(context.to) + .map((x) -> x.getChild(0).getPayload()) + .map(Token.class::cast) + .map(AstBuilder::getIntervalFieldType)); + } + + + @Override + public Node visitExplain(SqlBaseParser.ExplainContext ctx) { + Statement statement = (Statement) visit(ctx.statement()); + // Only simple explain is supported for now. + //TODO: Expand to support other parts of EXPLAIN + + Explain explain = new Explain(statement, false, Arrays.asList()); + return explain; + } + + // ***************** helpers ***************** + + @Override + protected Node defaultResult() { + return null; + } + + @Override + protected Node aggregateResult(Node aggregate, Node nextResult) { + if (nextResult == null) { + throw new UnsupportedOperationException("not yet implemented"); + } + + if (aggregate == null) { + return nextResult; + } + + throw new UnsupportedOperationException("not yet implemented"); + } + + private Optional visitIfPresent(ParserRuleContext context, Class clazz) { + return Optional.ofNullable(context) + .map(this::visit) + .map(clazz::cast); + } + + private List visit(List contexts, Class clazz) { + return contexts.stream() + .map(this::visit) + .map(clazz::cast) + .collect(toList()); + } + + public static String getIdentifierText(SqlBaseParser.IdentifierContext context) { + if (context instanceof SqlBaseParser.QuotedIdentifierAlternativeContext) { + return unquote(context.getText(), "\""); + } else if (context instanceof SqlBaseParser.BackQuotedIdentifierContext) { + return unquote(context.getText(), "`"); + } else { + return context.getText().toUpperCase(); + } + } + + public static String unquote(String value, String quote) { + return value.substring(1, value.length() - 1) + .replace(quote + quote, quote); + } + + private static QualifiedName getQualifiedName(SqlBaseParser.QualifiedNameContext context) { + List parts = context + .identifier().stream() + .map(AstBuilder::getIdentifierText) + .collect(toList()); + + return QualifiedName.of(parts); + } + + private static boolean isDistinct(SqlBaseParser.SetQuantifierContext setQuantifier) { + return setQuantifier != null && setQuantifier.DISTINCT() != null; + } + + private static Optional getTextIfPresent(Token token) { + return Optional.ofNullable(token) + .map(Token::getText); + } + + private static List getColumnAliases( + SqlBaseParser.ColumnAliasesContext columnAliasesContext) { + if (columnAliasesContext == null) { + return null; + } + + return columnAliasesContext + .identifier().stream() + .map(AstBuilder::getIdentifierText) + .collect(toList()); + } + + private static ArithmeticBinaryExpression.Type getArithmeticBinaryOperator(Token operator) { + switch (operator.getType()) { + case SqlBaseLexer.PLUS: + return ArithmeticBinaryExpression.Type.ADD; + case SqlBaseLexer.MINUS: + return ArithmeticBinaryExpression.Type.SUBTRACT; + case SqlBaseLexer.ASTERISK: + return ArithmeticBinaryExpression.Type.MULTIPLY; + case SqlBaseLexer.SLASH: + return ArithmeticBinaryExpression.Type.DIVIDE; + case SqlBaseLexer.PERCENT: + return ArithmeticBinaryExpression.Type.MODULUS; + default: + throw new UnsupportedOperationException("Unsupported operator: " + operator.getText()); + } + } + + private static ComparisonExpression.Type getComparisonOperator(Token symbol) { + switch (symbol.getType()) { + case SqlBaseLexer.EQ: + return ComparisonExpression.Type.EQUAL; + case SqlBaseLexer.NEQ: + return ComparisonExpression.Type.NOT_EQUAL; + case SqlBaseLexer.LT: + return ComparisonExpression.Type.LESS_THAN; + case SqlBaseLexer.LTE: + return ComparisonExpression.Type.LESS_THAN_OR_EQUAL; + case SqlBaseLexer.GT: + return ComparisonExpression.Type.GREATER_THAN; + case SqlBaseLexer.GTE: + return ComparisonExpression.Type.GREATER_THAN_OR_EQUAL; + default: + throw new IllegalArgumentException("Unsupported operator: " + symbol.getText()); + } + } + + private static IntervalLiteral.IntervalField getIntervalFieldType(Token token) { + switch (token.getType()) { + case SqlBaseLexer.YEAR: + return IntervalLiteral.IntervalField.YEAR; + case SqlBaseLexer.MONTH: + return IntervalLiteral.IntervalField.MONTH; + case SqlBaseLexer.DAY: + return IntervalLiteral.IntervalField.DAY; + case SqlBaseLexer.HOUR: + return IntervalLiteral.IntervalField.HOUR; + case SqlBaseLexer.MINUTE: + return IntervalLiteral.IntervalField.MINUTE; + case SqlBaseLexer.SECOND: + return IntervalLiteral.IntervalField.SECOND; + default: + throw new IllegalArgumentException("Unsupported interval field: " + token.getText()); + } + } + + private static IntervalLiteral.Sign getIntervalSign(Token token) { + switch (token.getType()) { + case SqlBaseLexer.MINUS: + return IntervalLiteral.Sign.NEGATIVE; + case SqlBaseLexer.PLUS: + return IntervalLiteral.Sign.POSITIVE; + default: + throw new IllegalArgumentException("Unsupported sign: " + token.getText()); + } + } + + private static LogicalBinaryExpression.Type getLogicalBinaryOperator(Token token) { + switch (token.getType()) { + case SqlBaseLexer.AND: + return LogicalBinaryExpression.Type.AND; + case SqlBaseLexer.OR: + return LogicalBinaryExpression.Type.OR; + default: + throw new IllegalArgumentException("Unsupported operator: " + token.getText()); + } + } + + private static String getType(SqlBaseParser.TypeContext type) { + if (type.baseType() != null) { + String signature = baseTypeToString(type.baseType()); + if (!type.typeParameter().isEmpty()) { + String typeParameterSignature = type + .typeParameter() + .stream() + .map(AstBuilder::typeParameterToString) + .collect(Collectors.joining(",")); + signature += "(" + typeParameterSignature + ")"; + } + return signature; + } + + if (type.ARRAY() != null) { + return "ARRAY(" + getType(type.type(0)) + ")"; + } + + if (type.MAP() != null) { + return "MAP(" + getType(type.type(0)) + "," + getType(type.type(1)) + ")"; + } + + if (type.ROW() != null) { + StringBuilder builder = new StringBuilder("("); + for (int i = 0; i < type.identifier().size(); i++) { + if (i != 0) { + builder.append(","); + } + builder.append(getIdentifierText(type.identifier(i))) + .append(" ") + .append(getType(type.type(i))); + } + builder.append(")"); + return "ROW" + builder.toString(); + } + + throw new IllegalArgumentException("Unsupported type specification: " + type.getText()); + } + + private static String typeParameterToString(SqlBaseParser.TypeParameterContext typeParameter) { + if (typeParameter.INTEGER_VALUE() != null) { + return typeParameter.INTEGER_VALUE().toString(); + } + if (typeParameter.type() != null) { + return getType(typeParameter.type()); + } + throw new IllegalArgumentException("Unsupported typeParameter: " + typeParameter.getText()); + } + + private static String baseTypeToString(SqlBaseParser.BaseTypeContext baseType) { + if (baseType.identifier() != null) { + return getIdentifierText(baseType.identifier()); + } else if (baseType.TIME_WITH_TIME_ZONE() != null) { + return baseType.TIME_WITH_TIME_ZONE().getText().toUpperCase(); + } else if (baseType.TIMESTAMP_WITH_TIME_ZONE() != null) { + return baseType.TIMESTAMP_WITH_TIME_ZONE().getText().toUpperCase(); + } else { + throw new KsqlException( + "Base type must contain either identifier, " + + "time with time zone, or timestamp with time zone" + ); + } + } + + private static void check(boolean condition, String message, ParserRuleContext context) { + if (!condition) { + throw new ParsingException(message, null, context.getStart().getLine(), + context.getStart().getCharPositionInLine()); + } + } + + private static NodeLocation getLocation(TerminalNode terminalNode) { + requireNonNull(terminalNode, "terminalNode is null"); + return getLocation(terminalNode.getSymbol()); + } + + private static NodeLocation getLocation(ParserRuleContext parserRuleContext) { + requireNonNull(parserRuleContext, "parserRuleContext is null"); + return getLocation(parserRuleContext.getStart()); + } + + private static NodeLocation getLocation(Token token) { + requireNonNull(token, "token is null"); + return new NodeLocation(token.getLine(), token.getCharPositionInLine()); + } + + private StructuredDataSource getResultDatasource(Select select, Table into) { + + SchemaBuilder dataSource = SchemaBuilder.struct().name(into.toString()); + for (SelectItem selectItem : select.getSelectItems()) { + if (selectItem instanceof SingleColumn) { + SingleColumn singleColumn = (SingleColumn) selectItem; + String fieldName = singleColumn.getAlias().get(); + String fieldType = null; + dataSource = dataSource.field(fieldName, Schema.BOOLEAN_SCHEMA); + } + } + + KsqlTopic ksqlTopic = new KsqlTopic(into.getName().toString(), into.getName().toString(), + null); + StructuredDataSource + resultStream = + new KsqlStream(into.getName().toString(), dataSource.schema(), dataSource.fields().get(0), + null, + ksqlTopic + ); + return resultStream; + } + + private static class InvalidColumnReferenceException extends KsqlException { + public InvalidColumnReferenceException(String message) { + super(message); + } + + public InvalidColumnReferenceException(String message, Throwable cause) { + super(message, cause); + } + } +} diff --git a/ksql-core/src/main/java/io/confluent/ksql/parser/CaseInsensitiveStream.java b/ksql-core/src/main/java/io/confluent/ksql/parser/CaseInsensitiveStream.java new file mode 100644 index 000000000000..45cf1d2ca941 --- /dev/null +++ b/ksql-core/src/main/java/io/confluent/ksql/parser/CaseInsensitiveStream.java @@ -0,0 +1,72 @@ +/** + * Copyright 2017 Confluent Inc. + **/ + +package io.confluent.ksql.parser; + +import org.antlr.v4.runtime.CharStream; +import org.antlr.v4.runtime.IntStream; +import org.antlr.v4.runtime.misc.Interval; + +public class CaseInsensitiveStream + implements CharStream { + + private final CharStream stream; + + public CaseInsensitiveStream(CharStream stream) { + this.stream = stream; + } + + @Override + public String getText(Interval interval) { + return stream.getText(interval); + } + + @Override + public void consume() { + stream.consume(); + } + + @Override + public int LA(int i) { + int result = stream.LA(i); + + switch (result) { + case 0: + case IntStream.EOF: + return result; + default: + return Character.toUpperCase(result); + } + } + + @Override + public int mark() { + return stream.mark(); + } + + @Override + public void release(int marker) { + stream.release(marker); + } + + @Override + public int index() { + return stream.index(); + } + + @Override + public void seek(int index) { + stream.seek(index); + } + + @Override + public int size() { + return stream.size(); + } + + @Override + public String getSourceName() { + return stream.getSourceName(); + } +} diff --git a/ksql-core/src/main/java/io/confluent/ksql/parser/CodegenExpressionFormatter.java b/ksql-core/src/main/java/io/confluent/ksql/parser/CodegenExpressionFormatter.java new file mode 100644 index 000000000000..67dae208e6bb --- /dev/null +++ b/ksql-core/src/main/java/io/confluent/ksql/parser/CodegenExpressionFormatter.java @@ -0,0 +1,470 @@ +/** + * Copyright 2017 Confluent Inc. + **/ + +package io.confluent.ksql.parser; + +import com.google.common.base.Joiner; +import io.confluent.ksql.function.KsqlFunction; +import io.confluent.ksql.function.KsqlFunctionException; +import io.confluent.ksql.function.KsqlFunctions; +import io.confluent.ksql.parser.tree.AllColumns; +import io.confluent.ksql.parser.tree.ArithmeticBinaryExpression; +import io.confluent.ksql.parser.tree.ArithmeticUnaryExpression; +import io.confluent.ksql.parser.tree.AstVisitor; +import io.confluent.ksql.parser.tree.BetweenPredicate; +import io.confluent.ksql.parser.tree.BinaryLiteral; +import io.confluent.ksql.parser.tree.BooleanLiteral; +import io.confluent.ksql.parser.tree.Cast; +import io.confluent.ksql.parser.tree.ComparisonExpression; +import io.confluent.ksql.parser.tree.DecimalLiteral; +import io.confluent.ksql.parser.tree.DereferenceExpression; +import io.confluent.ksql.parser.tree.DoubleLiteral; +import io.confluent.ksql.parser.tree.Expression; +import io.confluent.ksql.parser.tree.FieldReference; +import io.confluent.ksql.parser.tree.FunctionCall; +import io.confluent.ksql.parser.tree.GenericLiteral; +import io.confluent.ksql.parser.tree.IsNotNullPredicate; +import io.confluent.ksql.parser.tree.IsNullPredicate; +import io.confluent.ksql.parser.tree.LikePredicate; +import io.confluent.ksql.parser.tree.LogicalBinaryExpression; +import io.confluent.ksql.parser.tree.LongLiteral; +import io.confluent.ksql.parser.tree.Node; +import io.confluent.ksql.parser.tree.NotExpression; +import io.confluent.ksql.parser.tree.NullLiteral; +import io.confluent.ksql.parser.tree.QualifiedName; +import io.confluent.ksql.parser.tree.QualifiedNameReference; +import io.confluent.ksql.parser.tree.StringLiteral; +import io.confluent.ksql.parser.tree.SubscriptExpression; +import io.confluent.ksql.parser.tree.SymbolReference; +import io.confluent.ksql.util.KsqlException; +import io.confluent.ksql.util.Pair; +import io.confluent.ksql.util.SchemaUtil; +import org.apache.kafka.connect.data.Field; +import org.apache.kafka.connect.data.Schema; + +import java.util.ArrayList; +import java.util.List; +import java.util.Optional; + +import static java.lang.String.format; + +public class CodegenExpressionFormatter { + + private CodegenExpressionFormatter() { + } + + static Schema schema; + + + public static String formatExpression(final Expression expression, final Schema schema) { + CodegenExpressionFormatter.schema = schema; + return formatExpression(expression, true); + } + + public static String formatExpression(final Expression expression, final boolean unmangleNames) { + Pair + expressionFormatterResult = + new CodegenExpressionFormatter.Formatter().process(expression, unmangleNames); + return expressionFormatterResult.getLeft(); + } + + + public static class Formatter + extends AstVisitor, Boolean> { + + @Override + protected Pair visitNode(final Node node, Boolean unmangleNames) { + throw new UnsupportedOperationException(); + } + + @Override + protected Pair visitExpression(final Expression node, + final Boolean unmangleNames) { + throw new UnsupportedOperationException( + format("not yet implemented: %s.visit%s", getClass().getName(), + node.getClass().getSimpleName())); + } + + @Override + protected Pair visitBooleanLiteral(final BooleanLiteral node, + final Boolean unmangleNames) { + return new Pair<>(String.valueOf(node.getValue()), Schema.BOOLEAN_SCHEMA); + } + + @Override + protected Pair visitStringLiteral(final StringLiteral node, + final Boolean unmangleNames) { + return new Pair<>("\"" + node.getValue() + "\"", Schema.STRING_SCHEMA); + } + + @Override + protected Pair visitBinaryLiteral(BinaryLiteral node, + Boolean unmangleNames) { + throw new UnsupportedOperationException(); + } + + + @Override + protected Pair visitDoubleLiteral(DoubleLiteral node, + Boolean unmangleNames) { + return new Pair<>(Double.toString(node.getValue()), Schema.FLOAT64_SCHEMA); + } + + @Override + protected Pair visitDecimalLiteral(DecimalLiteral node, + Boolean unmangleNames) { + throw new UnsupportedOperationException(); + } + + @Override + protected Pair visitGenericLiteral(GenericLiteral node, + Boolean unmangleNames) { + throw new UnsupportedOperationException(); + } + + @Override + protected Pair visitNullLiteral(NullLiteral node, Boolean unmangleNames) { + throw new UnsupportedOperationException(); + } + + @Override + protected Pair visitQualifiedNameReference(QualifiedNameReference node, + Boolean unmangleNames) { + String fieldName = formatQualifiedName(node.getName()); + Optional schemaField = SchemaUtil.getFieldByName(schema, fieldName); + if (!schemaField.isPresent()) { + throw new KsqlException("Field not found: " + fieldName); + } + return new Pair<>(fieldName.replace(".", "_"), schemaField.get().schema()); + } + + @Override + protected Pair visitSymbolReference(SymbolReference node, + Boolean context) { + String fieldName = formatIdentifier(node.getName()); + Optional schemaField = SchemaUtil.getFieldByName(schema, fieldName); + if (!schemaField.isPresent()) { + throw new KsqlException("Field not found: " + fieldName); + } + return new Pair<>(fieldName, schemaField.get().schema()); + } + + @Override + protected Pair visitDereferenceExpression(DereferenceExpression node, + Boolean unmangleNames) { + String fieldName = node.toString(); + Optional schemaField = SchemaUtil.getFieldByName(schema, fieldName); + if (!schemaField.isPresent()) { + throw new KsqlException("Field not found: " + fieldName); + } + return new Pair<>(fieldName.replace(".", "_"), schemaField.get().schema()); + } + + private static String formatQualifiedName(QualifiedName name) { + List parts = new ArrayList<>(); + for (String part : name.getParts()) { + parts.add(formatIdentifier(part)); + } + return Joiner.on('.').join(parts); + } + + @Override + public Pair visitFieldReference(FieldReference node, + Boolean unmangleNames) { + throw new UnsupportedOperationException(); + } + + protected Pair visitLongLiteral(LongLiteral node, Boolean unmangleNames) { + return new Pair<>("Long.parseLong(\"" + node.getValue() + "\")", Schema.INT64_SCHEMA); + } + + + @Override + protected Pair visitFunctionCall(FunctionCall node, + Boolean unmangleNames) { + StringBuilder builder = new StringBuilder("("); + String name = node.getName().getSuffix(); + KsqlFunction ksqlFunction = KsqlFunctions.getFunction(name); + String javaReturnType = SchemaUtil.getJavaType(ksqlFunction.getReturnType()).getSimpleName(); + builder.append("(" + javaReturnType + ") " + name + ".evaluate("); + boolean addComma = false; + for (Expression argExpr:node.getArguments()) { + Pair processedArg = process(argExpr, unmangleNames); + if (addComma) { + builder.append(" , "); + } else { + addComma = true; + } + builder.append(processedArg.getLeft()); + } + builder.append(")"); + builder.append(")"); + return new Pair<>(builder.toString(), ksqlFunction.getReturnType()); + } + + @Override + protected Pair visitLogicalBinaryExpression(LogicalBinaryExpression node, + Boolean unmangleNames) { + if (node.getType() == LogicalBinaryExpression.Type.OR) { + return new Pair<>( + formatBinaryExpression(" || ", node.getLeft(), node.getRight(), unmangleNames), + Schema.BOOLEAN_SCHEMA); + } else if (node.getType() == LogicalBinaryExpression.Type.AND) { + return new Pair<>( + formatBinaryExpression(" && ", node.getLeft(), node.getRight(), unmangleNames), + Schema.BOOLEAN_SCHEMA); + } + throw new UnsupportedOperationException( + format("not yet implemented: %s.visit%s", getClass().getName(), + node.getClass().getSimpleName())); + } + + @Override + protected Pair visitNotExpression(NotExpression node, + Boolean unmangleNames) { + String exprString = process(node.getValue(), unmangleNames).getLeft(); + return new Pair<>("(!" + exprString + ")", Schema.BOOLEAN_SCHEMA); + } + + @Override + protected Pair visitComparisonExpression(ComparisonExpression node, + Boolean unmangleNames) { + Pair left = process(node.getLeft(), unmangleNames); + Pair right = process(node.getRight(), unmangleNames); + if ((left.getRight() == Schema.STRING_SCHEMA) || (right.getRight() == Schema.STRING_SCHEMA)) { + if ("=".equals(node.getType().getValue())) { + return new Pair<>("(" + left.getLeft() + ".equals(" + right.getLeft() + "))", + Schema.BOOLEAN_SCHEMA); + } else if ("<>".equals(node.getType().getValue())) { + return new Pair<>(" (!" + left.getLeft() + ".equals(" + right.getLeft() + "))", + Schema.BOOLEAN_SCHEMA); + } + } + String typeStr = node.getType().getValue(); + if ("=".equals(typeStr)) { + typeStr = "=="; + } else if ("<>".equals(typeStr)) { + typeStr = "!="; + } + return new Pair<>("(" + left.getLeft() + " " + typeStr + " " + right.getLeft() + ")", + Schema.BOOLEAN_SCHEMA); + } + + @Override + protected Pair visitCast(Cast node, Boolean context) { + Pair expr = process(node.getExpression(), context); + String returnTypeStr = node.getType(); + Schema returnType = SchemaUtil.getTypeSchema(returnTypeStr); + + switch (returnTypeStr) { + + case "VARCHAR": + case "STRING": + return new Pair<>("String.valueOf(" + expr.getLeft() + ")", returnType); + + case "BOOLEAN": { + Schema rightSchema = expr.getRight(); + return new Pair<>(getCastToBooleanString(rightSchema, expr.getLeft()), returnType); + } + + case "INTEGER": { + Schema rightSchema = expr.getRight(); + String exprStr = getCastToIntegerString(rightSchema, expr.getLeft()); + return new Pair<>(exprStr, returnType); + } + + case "BIGINT": { + Schema rightSchema = expr.getRight(); + String exprStr = getCastToLongString(rightSchema, expr.getLeft()); + return new Pair<>(exprStr, returnType); + } + + case "DOUBLE": { + Schema rightSchema = expr.getRight(); + String exprStr = getCastToDoubleString(rightSchema, expr.getLeft()); + return new Pair<>(exprStr, returnType); + } + default: + throw new KsqlFunctionException("Invalid cast operation: " + returnTypeStr); + } + } + + @Override + protected Pair visitIsNullPredicate(IsNullPredicate node, + Boolean unmangleNames) { + Pair value = process(node.getValue(), unmangleNames); + return new Pair<>("((" + value.getLeft() + ") == null )", Schema.BOOLEAN_SCHEMA); + } + + @Override + protected Pair visitIsNotNullPredicate(IsNotNullPredicate node, + Boolean unmangleNames) { + Pair value = process(node.getValue(), unmangleNames); + return new Pair<>("((" + value.getLeft() + ") != null )", Schema.BOOLEAN_SCHEMA); + } + + @Override + protected Pair visitArithmeticUnary(ArithmeticUnaryExpression node, + Boolean unmangleNames) { + Pair value = process(node.getValue(), unmangleNames); + + switch (node.getSign()) { + case MINUS: + // this is to avoid turning a sequence of "-" into a comment (i.e., "-- comment") + String separator = value.getLeft().startsWith("-") ? " " : ""; + return new Pair<>("-" + separator + value.getLeft(), value.getRight()); + case PLUS: + return new Pair<>("+" + value.getLeft(), value.getRight()); + default: + throw new UnsupportedOperationException("Unsupported sign: " + node.getSign()); + } + } + + @Override + protected Pair visitArithmeticBinary(ArithmeticBinaryExpression node, + Boolean unmangleNames) { + Pair left = process(node.getLeft(), unmangleNames); + Pair right = process(node.getRight(), unmangleNames); + return new Pair<>( + "(" + left.getLeft() + " " + node.getType().getValue() + " " + right.getLeft() + ")", + Schema.FLOAT64_SCHEMA); + } + + @Override + protected Pair visitLikePredicate(LikePredicate node, + Boolean unmangleNames) { + + // For now we just support simple prefix/suffix cases only. + String paternString = process(node.getPattern(), true).getLeft().substring(1); + paternString = paternString.substring(0, paternString.length() - 1); + String valueString = process(node.getValue(), true).getLeft(); + + if (paternString.startsWith("%")) { + if (paternString.endsWith("%")) { + return new Pair<>("(" + valueString + ").contains(\"" + + paternString.substring(1, paternString.length() - 1) + + "\")", + Schema + .STRING_SCHEMA); + } else { + return new Pair<>("(" + valueString + ").endsWith(\"" + paternString.substring(1) + + "\")", Schema.STRING_SCHEMA); + } + } + + if (paternString.endsWith("%")) { + return new Pair<>("(" + valueString + ")" + + ".startsWith(\"" + + paternString.substring(0, paternString.length() - 1) + "\")", + Schema + .STRING_SCHEMA); + } + + throw new UnsupportedOperationException(); + } + + @Override + protected Pair visitAllColumns(AllColumns node, Boolean unmangleNames) { + throw new UnsupportedOperationException(); + } + + @Override + protected Pair visitSubscriptExpression(SubscriptExpression node, + Boolean unmangleNames) { + String arrayBaseName = node.getBase().toString(); + Optional schemaField = SchemaUtil.getFieldByName(schema, arrayBaseName); + if (!schemaField.isPresent()) { + throw new KsqlException("Field not found: " + arrayBaseName); + } + if (schemaField.get().schema().type() == Schema.Type.ARRAY) { + return new Pair<>(process(node.getBase(), unmangleNames).getLeft() + "[(int)(" + + process(node.getIndex(), unmangleNames).getLeft() + ")]", schema); + } else if (schemaField.get().schema().type() == Schema.Type.MAP) { + return new Pair<>("(" + + SchemaUtil.getJavaCastString(schemaField.get().schema().valueSchema()) + + process(node.getBase(), unmangleNames).getLeft() + ".get" + + "(" + process(node.getIndex(), unmangleNames).getLeft() + "))", schema); + } + throw new UnsupportedOperationException(); + } + + @Override + protected Pair visitBetweenPredicate(BetweenPredicate node, + Boolean unmangleNames) { + throw new UnsupportedOperationException(); + } + + private String formatBinaryExpression(String operator, Expression left, Expression right, + boolean unmangleNames) { + return "(" + process(left, unmangleNames).getLeft() + " " + operator + " " + + process(right, unmangleNames).getLeft() + ")"; + } + + private static String formatIdentifier(String s) { + // TODO: handle escaping properly + return s; + } + + private String joinExpressions(List expressions, boolean unmangleNames) { + return Joiner.on(", ").join(expressions.stream() + .map((e) -> process(e, unmangleNames)).iterator()); + } + + private String getCastToBooleanString(Schema schema, String exprStr) { + if (schema == Schema.BOOLEAN_SCHEMA) { + return exprStr; + } else if (schema == Schema.STRING_SCHEMA) { + return "Boolean.parseBoolean(" + exprStr + ")"; + } else { + throw new KsqlFunctionException( + "Invalid cast operation: Cannot cast " + exprStr + " to boolean."); + } + } + + private String getCastToIntegerString(Schema schema, String exprStr) { + if (schema == Schema.STRING_SCHEMA) { + return "Integer.parseInt(" + exprStr + ")"; + } else if (schema == Schema.INT32_SCHEMA) { + return exprStr; + } else if (schema == Schema.INT64_SCHEMA) { + return "(new Long(" + exprStr + ").intValue())"; + } else if (schema == Schema.FLOAT64_SCHEMA) { + return "(new Double(" + exprStr + ").intValue())"; + } else { + throw new KsqlFunctionException( + "Invalid cast operation: Cannot cast " + exprStr + " to Integer."); + } + } + + private String getCastToLongString(Schema schema, String exprStr) { + if (schema == Schema.STRING_SCHEMA) { + return "Long.parseLong(" + exprStr + ")"; + } else if (schema == Schema.INT32_SCHEMA) { + return "(new Integer(" + exprStr + ").longValue())"; + } else if (schema == Schema.INT64_SCHEMA) { + return exprStr; + } else if (schema == Schema.FLOAT64_SCHEMA) { + return "(new Double(" + exprStr + ").longValue())"; + } else { + throw new KsqlFunctionException("Invalid cast operation: Cannot cast " + + exprStr + " to Long."); + } + } + + private String getCastToDoubleString(Schema schema, String exprStr) { + if (schema == Schema.STRING_SCHEMA) { + return "Double.parseDouble(" + exprStr + ")"; + } else if (schema == Schema.INT32_SCHEMA) { + return "(new Integer(" + exprStr + ").doubleValue())"; + } else if (schema == Schema.INT64_SCHEMA) { + return "(new Long(" + exprStr + ").doubleValue())"; + } else if (schema == Schema.FLOAT64_SCHEMA) { + return exprStr; + } else { + throw new KsqlFunctionException("Invalid cast operation: Cannot cast " + + exprStr + " to Double."); + } + } + } +} diff --git a/ksql-core/src/main/java/io/confluent/ksql/parser/ExpressionFormatter.java b/ksql-core/src/main/java/io/confluent/ksql/parser/ExpressionFormatter.java new file mode 100644 index 000000000000..3f3423844976 --- /dev/null +++ b/ksql-core/src/main/java/io/confluent/ksql/parser/ExpressionFormatter.java @@ -0,0 +1,553 @@ +/** + * Copyright 2017 Confluent Inc. + **/ + +package io.confluent.ksql.parser; + +import com.google.common.base.Joiner; +import com.google.common.collect.ImmutableList; +import com.google.common.collect.ImmutableSet; +import io.confluent.ksql.parser.tree.AllColumns; +import io.confluent.ksql.parser.tree.ArithmeticBinaryExpression; +import io.confluent.ksql.parser.tree.ArithmeticUnaryExpression; +import io.confluent.ksql.parser.tree.AstVisitor; +import io.confluent.ksql.parser.tree.BetweenPredicate; +import io.confluent.ksql.parser.tree.BinaryLiteral; +import io.confluent.ksql.parser.tree.BooleanLiteral; +import io.confluent.ksql.parser.tree.Cast; +import io.confluent.ksql.parser.tree.ComparisonExpression; +import io.confluent.ksql.parser.tree.DecimalLiteral; +import io.confluent.ksql.parser.tree.DereferenceExpression; +import io.confluent.ksql.parser.tree.DoubleLiteral; +import io.confluent.ksql.parser.tree.ExistsPredicate; +import io.confluent.ksql.parser.tree.Expression; +import io.confluent.ksql.parser.tree.Extract; +import io.confluent.ksql.parser.tree.FieldReference; +import io.confluent.ksql.parser.tree.FrameBound; +import io.confluent.ksql.parser.tree.FunctionCall; +import io.confluent.ksql.parser.tree.GenericLiteral; +import io.confluent.ksql.parser.tree.GroupingElement; +import io.confluent.ksql.parser.tree.GroupingSets; +import io.confluent.ksql.parser.tree.InListExpression; +import io.confluent.ksql.parser.tree.InPredicate; +import io.confluent.ksql.parser.tree.IntervalLiteral; +import io.confluent.ksql.parser.tree.IsNotNullPredicate; +import io.confluent.ksql.parser.tree.IsNullPredicate; +import io.confluent.ksql.parser.tree.LambdaExpression; +import io.confluent.ksql.parser.tree.LikePredicate; +import io.confluent.ksql.parser.tree.LogicalBinaryExpression; +import io.confluent.ksql.parser.tree.LongLiteral; +import io.confluent.ksql.parser.tree.Node; +import io.confluent.ksql.parser.tree.NotExpression; +import io.confluent.ksql.parser.tree.NullIfExpression; +import io.confluent.ksql.parser.tree.NullLiteral; +import io.confluent.ksql.parser.tree.QualifiedName; +import io.confluent.ksql.parser.tree.QualifiedNameReference; +import io.confluent.ksql.parser.tree.Row; +import io.confluent.ksql.parser.tree.SearchedCaseExpression; +import io.confluent.ksql.parser.tree.SimpleCaseExpression; +import io.confluent.ksql.parser.tree.SimpleGroupBy; +import io.confluent.ksql.parser.tree.SortItem; +import io.confluent.ksql.parser.tree.StringLiteral; +import io.confluent.ksql.parser.tree.SubqueryExpression; +import io.confluent.ksql.parser.tree.SubscriptExpression; +import io.confluent.ksql.parser.tree.SymbolReference; +import io.confluent.ksql.parser.tree.TimeLiteral; +import io.confluent.ksql.parser.tree.TimestampLiteral; +import io.confluent.ksql.parser.tree.WhenClause; +import io.confluent.ksql.parser.tree.Window; +import io.confluent.ksql.parser.tree.WindowFrame; + +import java.util.ArrayList; +import java.util.List; +import java.util.Set; +import java.util.function.Function; + +import static com.google.common.collect.Iterables.getOnlyElement; +import static java.lang.String.format; +import static java.util.stream.Collectors.toList; + +public final class ExpressionFormatter { + + private ExpressionFormatter() { + } + + public static String formatExpression(Expression expression) { + return formatExpression(expression, true); + } + + public static String formatExpression(Expression expression, boolean unmangleNames) { + return new Formatter().process(expression, unmangleNames); + } + + public static class Formatter + extends AstVisitor { + + @Override + protected String visitNode(Node node, Boolean unmangleNames) { + throw new UnsupportedOperationException(); + } + + @Override + protected String visitRow(Row node, Boolean unmangleNames) { + return "ROW (" + Joiner.on(", ").join(node.getItems().stream() + .map((child) -> process(child, unmangleNames)) + .collect(toList())) + ")"; + } + + @Override + protected String visitExpression(Expression node, Boolean unmangleNames) { + throw new UnsupportedOperationException( + format("not yet implemented: %s.visit%s", getClass().getName(), + node.getClass().getSimpleName())); + } + + @Override + protected String visitExtract(Extract node, Boolean unmangleNames) { + return "EXTRACT(" + node.getField() + " FROM " + process(node.getExpression(), unmangleNames) + + ")"; + } + + @Override + protected String visitBooleanLiteral(BooleanLiteral node, Boolean unmangleNames) { + return String.valueOf(node.getValue()); + } + + @Override + protected String visitStringLiteral(StringLiteral node, Boolean unmangleNames) { + return formatStringLiteral(node.getValue()); + } + + @Override + protected String visitBinaryLiteral(BinaryLiteral node, Boolean unmangleNames) { + return "X'" + node.toHexString() + "'"; + } + + @Override + protected String visitSubscriptExpression(SubscriptExpression node, Boolean unmangleNames) { + return SqlFormatter.formatSql(node.getBase(), unmangleNames) + "[" + SqlFormatter + .formatSql(node.getIndex(), unmangleNames) + "]"; + } + + @Override + protected String visitLongLiteral(LongLiteral node, Boolean unmangleNames) { + return Long.toString(node.getValue()); + } + + @Override + protected String visitDoubleLiteral(DoubleLiteral node, Boolean unmangleNames) { + return Double.toString(node.getValue()); + } + + @Override + protected String visitDecimalLiteral(DecimalLiteral node, Boolean unmangleNames) { + return "DECIMAL '" + node.getValue() + "'"; + } + + @Override + protected String visitGenericLiteral(GenericLiteral node, Boolean unmangleNames) { + return node.getType() + " " + formatStringLiteral(node.getValue()); + } + + @Override + protected String visitTimeLiteral(TimeLiteral node, Boolean unmangleNames) { + return "TIME '" + node.getValue() + "'"; + } + + @Override + protected String visitTimestampLiteral(TimestampLiteral node, Boolean unmangleNames) { + return "TIMESTAMP '" + node.getValue() + "'"; + } + + @Override + protected String visitNullLiteral(NullLiteral node, Boolean unmangleNames) { + return "null"; + } + + @Override + protected String visitIntervalLiteral(IntervalLiteral node, Boolean unmangleNames) { + String sign = (node.getSign() == IntervalLiteral.Sign.NEGATIVE) ? "- " : ""; + StringBuilder builder = new StringBuilder() + .append("INTERVAL ") + .append(sign) + .append(" '").append(node.getValue()).append("' ") + .append(node.getStartField()); + + if (node.getEndField().isPresent()) { + builder.append(" TO ").append(node.getEndField().get()); + } + return builder.toString(); + } + + @Override + protected String visitSubqueryExpression(SubqueryExpression node, Boolean unmangleNames) { + return "(" + SqlFormatter.formatSql(node.getQuery(), unmangleNames) + ")"; + } + + @Override + protected String visitExists(ExistsPredicate node, Boolean unmangleNames) { + return "(EXISTS (" + SqlFormatter.formatSql(node.getSubquery(), unmangleNames) + "))"; + } + + @Override + protected String visitQualifiedNameReference(QualifiedNameReference node, + Boolean unmangleNames) { + return formatQualifiedName(node.getName()); + } + + @Override + protected String visitSymbolReference(SymbolReference node, Boolean context) { + return formatIdentifier(node.getName()); + } + + @Override + protected String visitDereferenceExpression(DereferenceExpression node, Boolean unmangleNames) { + String baseString = process(node.getBase(), unmangleNames); + return baseString + "." + formatIdentifier(node.getFieldName()); + } + + private static String formatQualifiedName(QualifiedName name) { + List parts = new ArrayList<>(); + for (String part : name.getParts()) { + parts.add(formatIdentifier(part)); + } + return Joiner.on('.').join(parts); + } + + @Override + public String visitFieldReference(FieldReference node, Boolean unmangleNames) { + // add colon so this won't parse + return ":input(" + node.getFieldIndex() + ")"; + } + + @Override + protected String visitFunctionCall(FunctionCall node, Boolean unmangleNames) { + StringBuilder builder = new StringBuilder(); + + String arguments = joinExpressions(node.getArguments(), unmangleNames); + if (node.getArguments().isEmpty() && "COUNT".equals(node.getName().getSuffix())) { + arguments = "*"; + } + if (node.isDistinct()) { + arguments = "DISTINCT " + arguments; + } + + builder.append(formatQualifiedName(node.getName())) + .append('(').append(arguments).append(')'); + + if (node.getWindow().isPresent()) { + builder.append(" OVER ").append(visitWindow(node.getWindow().get(), unmangleNames)); + } + + return builder.toString(); + } + + @Override + protected String visitLambdaExpression(LambdaExpression node, Boolean unmangleNames) { + StringBuilder builder = new StringBuilder(); + + builder.append('('); + Joiner.on(", ").appendTo(builder, node.getArguments()); + builder.append(") -> "); + builder.append(process(node.getBody(), unmangleNames)); + return builder.toString(); + } + + @Override + protected String visitLogicalBinaryExpression(LogicalBinaryExpression node, + Boolean unmangleNames) { + return formatBinaryExpression(node.getType().toString(), node.getLeft(), node.getRight(), + unmangleNames); + } + + @Override + protected String visitNotExpression(NotExpression node, Boolean unmangleNames) { + return "(NOT " + process(node.getValue(), unmangleNames) + ")"; + } + + @Override + protected String visitComparisonExpression(ComparisonExpression node, Boolean unmangleNames) { + return formatBinaryExpression(node.getType().getValue(), node.getLeft(), node.getRight(), + unmangleNames); + } + + @Override + protected String visitIsNullPredicate(IsNullPredicate node, Boolean unmangleNames) { + return "(" + process(node.getValue(), unmangleNames) + " IS NULL)"; + } + + @Override + protected String visitIsNotNullPredicate(IsNotNullPredicate node, Boolean unmangleNames) { + return "(" + process(node.getValue(), unmangleNames) + " IS NOT NULL)"; + } + + @Override + protected String visitNullIfExpression(NullIfExpression node, Boolean unmangleNames) { + return "NULLIF(" + process(node.getFirst(), unmangleNames) + ", " + process(node.getSecond(), + unmangleNames) + + ')'; + } + + @Override + protected String visitArithmeticUnary(ArithmeticUnaryExpression node, Boolean unmangleNames) { + String value = process(node.getValue(), unmangleNames); + + switch (node.getSign()) { + case MINUS: + // this is to avoid turning a sequence of "-" into a comment (i.e., "-- comment") + String separator = value.startsWith("-") ? " " : ""; + return "-" + separator + value; + case PLUS: + return "+" + value; + default: + throw new UnsupportedOperationException("Unsupported sign: " + node.getSign()); + } + } + + @Override + protected String visitArithmeticBinary(ArithmeticBinaryExpression node, Boolean unmangleNames) { + return formatBinaryExpression(node.getType().getValue(), node.getLeft(), node.getRight(), + unmangleNames); + } + + @Override + protected String visitLikePredicate(LikePredicate node, Boolean unmangleNames) { + StringBuilder builder = new StringBuilder(); + + builder.append('(') + .append(process(node.getValue(), unmangleNames)) + .append(" LIKE ") + .append(process(node.getPattern(), unmangleNames)); + + if (node.getEscape() != null) { + builder.append(" ESCAPE ") + .append(process(node.getEscape(), unmangleNames)); + } + + builder.append(')'); + + return builder.toString(); + } + + @Override + protected String visitAllColumns(AllColumns node, Boolean unmangleNames) { + if (node.getPrefix().isPresent()) { + return node.getPrefix().get() + ".*"; + } + + return "*"; + } + + @Override + public String visitCast(Cast node, Boolean unmangleNames) { + return (node.isSafe() ? "TRY_CAST" : "CAST") + + "(" + process(node.getExpression(), unmangleNames) + " AS " + node.getType() + ")"; + } + + @Override + protected String visitSearchedCaseExpression(SearchedCaseExpression node, + Boolean unmangleNames) { + ImmutableList.Builder parts = ImmutableList.builder(); + parts.add("CASE"); + for (WhenClause whenClause : node.getWhenClauses()) { + parts.add(process(whenClause, unmangleNames)); + } + + node.getDefaultValue() + .ifPresent((value) -> parts.add("ELSE").add(process(value, unmangleNames))); + + parts.add("END"); + + return "(" + Joiner.on(' ').join(parts.build()) + ")"; + } + + @Override + protected String visitSimpleCaseExpression(SimpleCaseExpression node, Boolean unmangleNames) { + ImmutableList.Builder parts = ImmutableList.builder(); + + parts.add("CASE") + .add(process(node.getOperand(), unmangleNames)); + + for (WhenClause whenClause : node.getWhenClauses()) { + parts.add(process(whenClause, unmangleNames)); + } + + node.getDefaultValue() + .ifPresent((value) -> parts.add("ELSE").add(process(value, unmangleNames))); + + parts.add("END"); + + return "(" + Joiner.on(' ').join(parts.build()) + ")"; + } + + @Override + protected String visitWhenClause(WhenClause node, Boolean unmangleNames) { + return "WHEN " + process(node.getOperand(), unmangleNames) + " THEN " + process( + node.getResult(), unmangleNames); + } + + @Override + protected String visitBetweenPredicate(BetweenPredicate node, Boolean unmangleNames) { + return "(" + process(node.getValue(), unmangleNames) + " BETWEEN " + + process(node.getMin(), unmangleNames) + " AND " + process(node.getMax(), + unmangleNames) + + ")"; + } + + @Override + protected String visitInPredicate(InPredicate node, Boolean unmangleNames) { + return "(" + process(node.getValue(), unmangleNames) + " IN " + process(node.getValueList(), + unmangleNames) + ")"; + } + + @Override + protected String visitInListExpression(InListExpression node, Boolean unmangleNames) { + return "(" + joinExpressions(node.getValues(), unmangleNames) + ")"; + } + + @Override + public String visitWindow(Window node, Boolean unmangleNames) { + + return node.toString(); + } + + @Override + public String visitWindowFrame(WindowFrame node, Boolean unmangleNames) { + StringBuilder builder = new StringBuilder(); + + builder.append(node.getType().toString()).append(' '); + + if (node.getEnd().isPresent()) { + builder.append("BETWEEN ") + .append(process(node.getStart(), unmangleNames)) + .append(" AND ") + .append(process(node.getEnd().get(), unmangleNames)); + } else { + builder.append(process(node.getStart(), unmangleNames)); + } + + return builder.toString(); + } + + @Override + public String visitFrameBound(FrameBound node, Boolean unmangleNames) { + switch (node.getType()) { + case UNBOUNDED_PRECEDING: + return "UNBOUNDED PRECEDING"; + case PRECEDING: + return process(node.getValue().get(), unmangleNames) + " PRECEDING"; + case CURRENT_ROW: + return "CURRENT ROW"; + case FOLLOWING: + return process(node.getValue().get(), unmangleNames) + " FOLLOWING"; + case UNBOUNDED_FOLLOWING: + return "UNBOUNDED FOLLOWING"; + default: + throw new IllegalArgumentException("unhandled type: " + node.getType()); + } + } + + private String formatBinaryExpression(String operator, Expression left, Expression right, + boolean unmangleNames) { + return '(' + process(left, unmangleNames) + ' ' + operator + ' ' + process(right, + unmangleNames) + + ')'; + } + + private String joinExpressions(List expressions, boolean unmangleNames) { + return Joiner.on(", ").join(expressions.stream() + .map((e) -> process(e, unmangleNames)) + .iterator()); + } + + private static String formatIdentifier(String s) { + // TODO: handle escaping properly + return s; + } + } + + static String formatStringLiteral(String s) { + return "'" + s.replace("'", "''") + "'"; + } + + static String formatSortItems(List sortItems) { + return formatSortItems(sortItems, true); + } + + static String formatSortItems(List sortItems, boolean unmangleNames) { + return Joiner.on(", ").join(sortItems.stream() + .map(sortItemFormatterFunction(unmangleNames)) + .iterator()); + } + + static String formatGroupBy(List groupingElements) { + ImmutableList.Builder resultStrings = ImmutableList.builder(); + + for (GroupingElement groupingElement : groupingElements) { + String result = ""; + if (groupingElement instanceof SimpleGroupBy) { + Set + columns = + ImmutableSet.copyOf(((SimpleGroupBy) groupingElement).getColumnExpressions()); + if (columns.size() == 1) { + result = formatExpression(getOnlyElement(columns)); + } else { + result = formatGroupingSet(columns); + } + } else if (groupingElement instanceof GroupingSets) { + result = format("GROUPING SETS (%s)", Joiner.on(", ").join( + groupingElement.enumerateGroupingSets().stream() + .map(ExpressionFormatter::formatGroupingSet) + .iterator())); + } + resultStrings.add(result); + } + return Joiner.on(", ").join(resultStrings.build()); + } + + private static String formatGroupingSet(Set groupingSet) { + return format("(%s)", Joiner.on(", ").join(groupingSet.stream() + .map(ExpressionFormatter::formatExpression) + .iterator())); + } + + private static String formatGroupingSet(List groupingSet) { + return format("(%s)", Joiner.on(", ").join(groupingSet)); + } + + private static Function sortItemFormatterFunction(boolean unmangleNames) { + return input -> { + StringBuilder builder = new StringBuilder(); + + builder.append(formatExpression(input.getSortKey(), unmangleNames)); + + switch (input.getOrdering()) { + case ASCENDING: + builder.append(" ASC"); + break; + case DESCENDING: + builder.append(" DESC"); + break; + default: + throw new UnsupportedOperationException("unknown ordering: " + input.getOrdering()); + } + + switch (input.getNullOrdering()) { + case FIRST: + builder.append(" NULLS FIRST"); + break; + case LAST: + builder.append(" NULLS LAST"); + break; + case UNDEFINED: + // no op + break; + default: + throw new UnsupportedOperationException( + "unknown null ordering: " + input.getNullOrdering()); + } + + return builder.toString(); + }; + } +} diff --git a/ksql-core/src/main/java/io/confluent/ksql/parser/KsqlParser.java b/ksql-core/src/main/java/io/confluent/ksql/parser/KsqlParser.java new file mode 100644 index 000000000000..7d7043c166b2 --- /dev/null +++ b/ksql-core/src/main/java/io/confluent/ksql/parser/KsqlParser.java @@ -0,0 +1,117 @@ +/** + * Copyright 2017 Confluent Inc. + **/ + +package io.confluent.ksql.parser; + +import io.confluent.ksql.exception.ParseFailedException; +import io.confluent.ksql.metastore.MetaStore; +import io.confluent.ksql.parser.tree.Node; +import io.confluent.ksql.parser.tree.Statement; +import io.confluent.ksql.util.DataSourceExtractor; +import io.confluent.ksql.util.Pair; +import org.antlr.v4.runtime.ANTLRInputStream; +import org.antlr.v4.runtime.BaseErrorListener; +import org.antlr.v4.runtime.CommonTokenStream; +import org.antlr.v4.runtime.ParserRuleContext; +import org.antlr.v4.runtime.RecognitionException; +import org.antlr.v4.runtime.Recognizer; +import org.antlr.v4.runtime.atn.PredictionMode; +import org.antlr.v4.runtime.misc.ParseCancellationException; + +import java.util.ArrayList; +import java.util.List; +import java.util.function.Function; + + +public class KsqlParser { + + + /** + * Builds an AST from the given query string. + */ + public List buildAst(String sql, MetaStore metaStore) { + + try { + ParserRuleContext tree = getParseTree(sql); + SqlBaseParser.StatementsContext statementsContext = (SqlBaseParser.StatementsContext) tree; + List astNodes = new ArrayList<>(); + for (SqlBaseParser.SingleStatementContext statementContext : statementsContext + .singleStatement()) { + DataSourceExtractor dataSourceExtractor = new DataSourceExtractor(metaStore); + dataSourceExtractor.extractDataSources(statementContext); + Node root = new AstBuilder(dataSourceExtractor).visit(statementContext); + Statement statement = (Statement) root; + + astNodes.add(statement); + } + return astNodes; + } catch (Exception e) { + // if we fail, parse with LL mode + throw new ParseFailedException(e.getMessage(), e); + } + } + + public List getStatements(String sql) { + try { + ParserRuleContext tree = getParseTree(sql); + SqlBaseParser.StatementsContext statementsContext = (SqlBaseParser.StatementsContext) tree; + return statementsContext.singleStatement(); + } catch (Exception e) { + throw new ParseFailedException(e.getMessage(), e); + } + } + + + public Pair prepareStatement( + SqlBaseParser.SingleStatementContext statementContext, MetaStore metaStore) { + DataSourceExtractor dataSourceExtractor = new DataSourceExtractor(metaStore); + dataSourceExtractor.extractDataSources(statementContext); + AstBuilder astBuilder = new AstBuilder(dataSourceExtractor); + Node root = astBuilder.visit(statementContext); + Statement statement = (Statement) root; + return new Pair<>(statement, dataSourceExtractor); + } + + + private ParserRuleContext getParseTree(String sql) { + + SqlBaseLexer + sqlBaseLexer = + new SqlBaseLexer(new CaseInsensitiveStream(new ANTLRInputStream(sql))); + CommonTokenStream tokenStream = new CommonTokenStream(sqlBaseLexer); + SqlBaseParser sqlBaseParser = new SqlBaseParser(tokenStream); + + sqlBaseLexer.removeErrorListeners(); + sqlBaseLexer.addErrorListener(ERROR_LISTENER); + + sqlBaseParser.removeErrorListeners(); + sqlBaseParser.addErrorListener(ERROR_LISTENER); + + Function parseFunction = SqlBaseParser::statements; + ParserRuleContext tree; + try { + // first, try parsing with potentially faster SLL mode + sqlBaseParser.getInterpreter().setPredictionMode(PredictionMode.SLL); + tree = parseFunction.apply(sqlBaseParser); + } catch (ParseCancellationException ex) { + // if we fail, parse with LL mode + tokenStream.reset(); // rewind input stream + sqlBaseParser.reset(); + + sqlBaseParser.getInterpreter().setPredictionMode(PredictionMode.LL); + tree = parseFunction.apply(sqlBaseParser); + } + + return tree; + } + + private static final BaseErrorListener ERROR_LISTENER = new BaseErrorListener() { + @Override + public void syntaxError(Recognizer recognizer, Object offendingSymbol, int line, + int charPositionInLine, String message, RecognitionException e) { + throw new ParsingException(message, e, line, charPositionInLine); + } + }; + +} diff --git a/ksql-core/src/main/java/io/confluent/ksql/parser/KsqlParserErrorStrategy.java b/ksql-core/src/main/java/io/confluent/ksql/parser/KsqlParserErrorStrategy.java new file mode 100644 index 000000000000..122711686ed0 --- /dev/null +++ b/ksql-core/src/main/java/io/confluent/ksql/parser/KsqlParserErrorStrategy.java @@ -0,0 +1,85 @@ +/** + * Copyright 2017 Confluent Inc. + **/ + +package io.confluent.ksql.parser; + + +import org.antlr.v4.runtime.DefaultErrorStrategy; +import org.antlr.v4.runtime.FailedPredicateException; +import org.antlr.v4.runtime.InputMismatchException; +import org.antlr.v4.runtime.NoViableAltException; +import org.antlr.v4.runtime.Parser; +import org.antlr.v4.runtime.RecognitionException; +import org.antlr.v4.runtime.Token; +import org.antlr.v4.runtime.TokenStream; +import org.antlr.v4.runtime.misc.IntervalSet; + +public class KsqlParserErrorStrategy extends DefaultErrorStrategy { + + public void reportError(Parser recognizer, RecognitionException e) { + if (!this.inErrorRecoveryMode(recognizer)) { + this.beginErrorCondition(recognizer); + if (e instanceof NoViableAltException) { + this.reportNoViableAlternative(recognizer, (NoViableAltException) e); + } else if (e instanceof InputMismatchException) { + this.reportInputMismatch(recognizer, (InputMismatchException) e); + } else if (e instanceof FailedPredicateException) { + this.reportFailedPredicate(recognizer, (FailedPredicateException) e); + } else { + System.err.println("unknown recognition error type: " + e.getClass().getName()); + recognizer.notifyErrorListeners(e.getOffendingToken(), e.getMessage(), e); + } + + } + } + + protected void reportNoViableAlternative(Parser recognizer, NoViableAltException e) { + TokenStream tokens = recognizer.getInputStream(); + String input; + if (tokens != null) { + if (e.getStartToken().getType() == -1) { + input = ""; + } else { + input = tokens.getText(e.getStartToken(), e.getOffendingToken()); + } + } else { + input = ""; + } + + String msg = "no viable alternative at input " + this.escapeWSAndQuote(input); + recognizer.notifyErrorListeners(e.getOffendingToken(), msg, e); + } + + protected void reportInputMismatch(Parser recognizer, InputMismatchException e) { + String msg = + "Syntax error. There is a mismatch between the expected term and te term in the query. " + + "Please check the line and column in the query."; + recognizer.notifyErrorListeners(e.getOffendingToken(), msg, e); + } + + protected void reportUnwantedToken(Parser recognizer) { + if (!this.inErrorRecoveryMode(recognizer)) { + this.beginErrorCondition(recognizer); + Token t = recognizer.getCurrentToken(); + String tokenName = this.getTokenErrorDisplay(t); + IntervalSet expecting = this.getExpectedTokens(recognizer); + String msg = + "extraneous input " + tokenName + " expecting " + + expecting.toString(recognizer.getVocabulary()); + recognizer.notifyErrorListeners(t, msg, (RecognitionException) null); + } + } + + protected void reportMissingToken(Parser recognizer) { + if (!this.inErrorRecoveryMode(recognizer)) { + this.beginErrorCondition(recognizer); + Token t = recognizer.getCurrentToken(); + IntervalSet expecting = this.getExpectedTokens(recognizer); + String msg = + "missing " + expecting.toString(recognizer.getVocabulary()) + " at " + this + .getTokenErrorDisplay(t); + recognizer.notifyErrorListeners(t, msg, (RecognitionException) null); + } + } +} diff --git a/ksql-core/src/main/java/io/confluent/ksql/parser/ParsingException.java b/ksql-core/src/main/java/io/confluent/ksql/parser/ParsingException.java new file mode 100644 index 000000000000..74d05eca9a17 --- /dev/null +++ b/ksql-core/src/main/java/io/confluent/ksql/parser/ParsingException.java @@ -0,0 +1,50 @@ +/** + * Copyright 2017 Confluent Inc. + **/ + +package io.confluent.ksql.parser; + +import io.confluent.ksql.parser.tree.NodeLocation; +import org.antlr.v4.runtime.RecognitionException; + +import static java.lang.String.format; + +public class ParsingException + extends RuntimeException { + + private final int line; + private final int charPositionInLine; + + public ParsingException(String message, RecognitionException cause, int line, + int charPositionInLine) { + super(message, cause); + + this.line = line; + this.charPositionInLine = charPositionInLine; + } + + public ParsingException(String message) { + this(message, null, 1, 0); + } + + public ParsingException(String message, NodeLocation nodeLocation) { + this(message, null, nodeLocation.getLineNumber(), nodeLocation.getColumnNumber()); + } + + public int getLineNumber() { + return line; + } + + public int getColumnNumber() { + return charPositionInLine + 1; + } + + public String getErrorMessage() { + return super.getMessage(); + } + + @Override + public String getMessage() { + return format("line %s:%s: %s", getLineNumber(), getColumnNumber(), getErrorMessage()); + } +} diff --git a/ksql-core/src/main/java/io/confluent/ksql/parser/SqlFormatter.java b/ksql-core/src/main/java/io/confluent/ksql/parser/SqlFormatter.java new file mode 100644 index 000000000000..a5891f33ab8c --- /dev/null +++ b/ksql-core/src/main/java/io/confluent/ksql/parser/SqlFormatter.java @@ -0,0 +1,696 @@ +/** + * Copyright 2017 Confluent Inc. + **/ + +package io.confluent.ksql.parser; + +import com.google.common.base.Joiner; +import com.google.common.base.Strings; +import com.google.common.collect.ImmutableSortedMap; +import io.confluent.ksql.parser.tree.AliasedRelation; +import io.confluent.ksql.parser.tree.AllColumns; +import io.confluent.ksql.parser.tree.AstVisitor; +import io.confluent.ksql.parser.tree.CreateTable; +import io.confluent.ksql.parser.tree.CreateTableAsSelect; +import io.confluent.ksql.parser.tree.CreateView; +import io.confluent.ksql.parser.tree.Delete; +import io.confluent.ksql.parser.tree.DropTable; +import io.confluent.ksql.parser.tree.DropView; +import io.confluent.ksql.parser.tree.Except; +import io.confluent.ksql.parser.tree.Explain; +import io.confluent.ksql.parser.tree.ExplainFormat; +import io.confluent.ksql.parser.tree.ExplainOption; +import io.confluent.ksql.parser.tree.ExplainType; +import io.confluent.ksql.parser.tree.Expression; +import io.confluent.ksql.parser.tree.Intersect; +import io.confluent.ksql.parser.tree.Join; +import io.confluent.ksql.parser.tree.JoinCriteria; +import io.confluent.ksql.parser.tree.JoinOn; +import io.confluent.ksql.parser.tree.JoinUsing; +import io.confluent.ksql.parser.tree.NaturalJoin; +import io.confluent.ksql.parser.tree.Node; +import io.confluent.ksql.parser.tree.QualifiedName; +import io.confluent.ksql.parser.tree.Query; +import io.confluent.ksql.parser.tree.QuerySpecification; +import io.confluent.ksql.parser.tree.Relation; +import io.confluent.ksql.parser.tree.RenameColumn; +import io.confluent.ksql.parser.tree.RenameTable; +import io.confluent.ksql.parser.tree.SampledRelation; +import io.confluent.ksql.parser.tree.Select; +import io.confluent.ksql.parser.tree.SelectItem; +import io.confluent.ksql.parser.tree.SetSession; +import io.confluent.ksql.parser.tree.ShowCatalogs; +import io.confluent.ksql.parser.tree.ShowColumns; +import io.confluent.ksql.parser.tree.ShowCreate; +import io.confluent.ksql.parser.tree.ShowFunctions; +import io.confluent.ksql.parser.tree.ShowPartitions; +import io.confluent.ksql.parser.tree.ShowSchemas; +import io.confluent.ksql.parser.tree.ShowSession; +import io.confluent.ksql.parser.tree.SingleColumn; +import io.confluent.ksql.parser.tree.Table; +import io.confluent.ksql.parser.tree.TableSubquery; +import io.confluent.ksql.parser.tree.Union; +import io.confluent.ksql.parser.tree.Values; +import io.confluent.ksql.parser.tree.With; +import io.confluent.ksql.parser.tree.WithQuery; + +import java.util.ArrayList; +import java.util.Iterator; +import java.util.List; +import java.util.regex.Pattern; + +import static com.google.common.base.Preconditions.checkArgument; +import static com.google.common.collect.Iterables.getOnlyElement; +import static com.google.common.collect.Iterables.transform; +import static java.util.stream.Collectors.joining; + +public final class SqlFormatter { + + private static final String INDENT = " "; + private static final Pattern NAME_PATTERN = Pattern.compile("[a-z_][a-z0-9_]*"); + + private SqlFormatter() { + } + + public static String formatSql(Node root) { + StringBuilder builder = new StringBuilder(); + new Formatter(builder, true).process(root, 0); + return builder.toString(); + } + + public static String formatSql(Node root, boolean unmangleNames) { + StringBuilder builder = new StringBuilder(); + new Formatter(builder, unmangleNames).process(root, 0); + return builder.toString(); + } + + private static class Formatter + extends AstVisitor { + + private final StringBuilder builder; + private final boolean unmangledNames; + + public Formatter(StringBuilder builder, boolean unmangleNames) { + this.builder = builder; + this.unmangledNames = unmangleNames; + } + + @Override + protected Void visitNode(Node node, Integer indent) { + throw new UnsupportedOperationException("not yet implemented: " + node); + } + + @Override + protected Void visitExpression(Expression node, Integer indent) { + checkArgument(indent == 0, + "visitExpression should only be called at root"); + builder.append(ExpressionFormatter.formatExpression(node, unmangledNames)); + return null; + } + + @Override + protected Void visitQuery(Query node, Integer indent) { + if (node.getWith().isPresent()) { + With with = node.getWith().get(); + append(indent, "WITH"); + if (with.isRecursive()) { + builder.append(" RECURSIVE"); + } + builder.append("\n "); + Iterator queries = with.getQueries().iterator(); + while (queries.hasNext()) { + WithQuery query = queries.next(); + append(indent, query.getName()); + query.getColumnNames().ifPresent(columnNames -> appendAliasColumns(builder, columnNames)); + builder.append(" AS "); + process(new TableSubquery(query.getQuery()), indent); + builder.append('\n'); + if (queries.hasNext()) { + builder.append(", "); + } + } + } + + processRelation(node.getQueryBody(), indent); + + if (!node.getOrderBy().isEmpty()) { + append(indent, "ORDER BY " + ExpressionFormatter.formatSortItems(node.getOrderBy())) + .append('\n'); + } + + if (node.getLimit().isPresent()) { + append(indent, "LIMIT " + node.getLimit().get()) + .append('\n'); + } + + return null; + } + + @Override + protected Void visitQuerySpecification(QuerySpecification node, Integer indent) { + process(node.getSelect(), indent); + + if (node.getFrom().isPresent()) { + append(indent, "FROM"); + builder.append('\n'); + append(indent, " "); + } + + builder.append('\n'); + + if (node.getWhere().isPresent()) { + append(indent, "WHERE " + ExpressionFormatter.formatExpression(node.getWhere().get())) + .append('\n'); + } + + if (node.getGroupBy().isPresent()) { + append(indent, "GROUP BY " + + (node.getGroupBy().get().isDistinct() ? " DISTINCT " : "") + + ExpressionFormatter + .formatGroupBy(node.getGroupBy().get().getGroupingElements())) + .append('\n'); + } + + if (node.getHaving().isPresent()) { + append(indent, "HAVING " + + ExpressionFormatter.formatExpression(node.getHaving().get())) + .append('\n'); + } + + if (!node.getOrderBy().isEmpty()) { + append(indent, "ORDER BY " + ExpressionFormatter.formatSortItems(node.getOrderBy())) + .append('\n'); + } + + if (node.getLimit().isPresent()) { + append(indent, "LIMIT " + node.getLimit().get()) + .append('\n'); + } + return null; + } + + @Override + protected Void visitSelect(Select node, Integer indent) { + append(indent, "SELECT"); + if (node.isDistinct()) { + builder.append(" DISTINCT"); + } + + if (node.getSelectItems().size() > 1) { + boolean first = true; + for (SelectItem item : node.getSelectItems()) { + builder.append("\n") + .append(indentString(indent)) + .append(first ? " " : ", "); + + process(item, indent); + first = false; + } + } else { + builder.append(' '); + process(getOnlyElement(node.getSelectItems()), indent); + } + + builder.append('\n'); + + return null; + } + + @Override + protected Void visitSingleColumn(SingleColumn node, Integer indent) { + builder.append(ExpressionFormatter.formatExpression(node.getExpression())); + if (node.getAlias().isPresent()) { + builder.append(' ') + .append('"') + .append(node.getAlias().get()) + .append('"'); // TODO: handle quoting properly + } + + return null; + } + + @Override + protected Void visitAllColumns(AllColumns node, Integer context) { + builder.append(node.toString()); + + return null; + } + + @Override + protected Void visitTable(Table node, Integer indent) { + builder.append(node.getName().toString()); + return null; + } + + @Override + protected Void visitJoin(Join node, Integer indent) { + JoinCriteria criteria = node.getCriteria().orElse(null); + String type = node.getType().toString(); + if (criteria instanceof NaturalJoin) { + type = "NATURAL " + type; + } + + if (node.getType() != Join.Type.IMPLICIT) { + builder.append('('); + } + process(node.getLeft(), indent); + + builder.append('\n'); + if (node.getType() == Join.Type.IMPLICIT) { + append(indent, ", "); + } else { + append(indent, type).append(" JOIN "); + } + + process(node.getRight(), indent); + + if (node.getType() != Join.Type.CROSS && node.getType() != Join.Type.IMPLICIT) { + if (criteria instanceof JoinUsing) { + JoinUsing using = (JoinUsing) criteria; + builder.append(" USING (") + .append(Joiner.on(", ").join(using.getColumns())) + .append(")"); + } else if (criteria instanceof JoinOn) { + JoinOn on = (JoinOn) criteria; + builder.append(" ON (") + .append(ExpressionFormatter.formatExpression(on.getExpression())) + .append(")"); + } else if (!(criteria instanceof NaturalJoin)) { + throw new UnsupportedOperationException("unknown join criteria: " + criteria); + } + } + + if (node.getType() != Join.Type.IMPLICIT) { + builder.append(")"); + } + + return null; + } + + @Override + protected Void visitAliasedRelation(AliasedRelation node, Integer indent) { + process(node.getRelation(), indent); + + builder.append(' ') + .append(node.getAlias()); + + appendAliasColumns(builder, node.getColumnNames()); + + return null; + } + + @Override + protected Void visitSampledRelation(SampledRelation node, Integer indent) { + process(node.getRelation(), indent); + + builder.append(" TABLESAMPLE ") + .append(node.getType()) + .append(" (") + .append(node.getSamplePercentage()) + .append(')'); + + if (node.getColumnsToStratifyOn().isPresent()) { + builder.append(" STRATIFY ON ") + .append(" (") + .append(Joiner.on(",").join(node.getColumnsToStratifyOn().get())); + builder.append(')'); + } + + return null; + } + + @Override + protected Void visitValues(Values node, Integer indent) { + builder.append(" VALUES "); + + boolean first = true; + for (Expression row : node.getRows()) { + builder.append("\n") + .append(indentString(indent)) + .append(first ? " " : ", "); + + builder.append(ExpressionFormatter.formatExpression(row)); + first = false; + } + builder.append('\n'); + + return null; + } + + @Override + protected Void visitTableSubquery(TableSubquery node, Integer indent) { + builder.append('(') + .append('\n'); + + process(node.getQuery(), indent + 1); + + append(indent, ") "); + + return null; + } + + @Override + protected Void visitUnion(Union node, Integer indent) { + Iterator relations = node.getRelations().iterator(); + + while (relations.hasNext()) { + processRelation(relations.next(), indent); + + if (relations.hasNext()) { + builder.append("UNION "); + if (!node.isDistinct()) { + builder.append("ALL "); + } + } + } + + return null; + } + + @Override + protected Void visitExcept(Except node, Integer indent) { + processRelation(node.getLeft(), indent); + + builder.append("EXCEPT "); + if (!node.isDistinct()) { + builder.append("ALL "); + } + + processRelation(node.getRight(), indent); + + return null; + } + + @Override + protected Void visitIntersect(Intersect node, Integer indent) { + Iterator relations = node.getRelations().iterator(); + + while (relations.hasNext()) { + processRelation(relations.next(), indent); + + if (relations.hasNext()) { + builder.append("INTERSECT "); + if (!node.isDistinct()) { + builder.append("ALL "); + } + } + } + + return null; + } + + @Override + protected Void visitCreateView(CreateView node, Integer indent) { + builder.append("CREATE "); + if (node.isReplace()) { + builder.append("OR REPLACE "); + } + builder.append("VIEW ") + .append(node.getName()) + .append(" AS\n"); + + process(node.getQuery(), indent); + + return null; + } + + @Override + protected Void visitDropView(DropView node, Integer context) { + builder.append("DROP VIEW "); + if (node.isExists()) { + builder.append("IF EXISTS "); + } + builder.append(node.getName()); + + return null; + } + + @Override + protected Void visitExplain(Explain node, Integer indent) { + builder.append("EXPLAIN "); + if (node.isAnalyze()) { + builder.append("ANALYZE "); + } + + List options = new ArrayList<>(); + + for (ExplainOption option : node.getOptions()) { + if (option instanceof ExplainType) { + options.add("TYPE " + ((ExplainType) option).getType()); + } else if (option instanceof ExplainFormat) { + options.add("FORMAT " + ((ExplainFormat) option).getType()); + } else { + throw new UnsupportedOperationException("unhandled explain option: " + option); + } + } + + if (!options.isEmpty()) { + builder.append("("); + Joiner.on(", ").appendTo(builder, options); + builder.append(")"); + } + + builder.append("\n"); + + process(node.getStatement(), indent); + + return null; + } + + @Override + protected Void visitShowCatalogs(ShowCatalogs node, Integer context) { + builder.append("SHOW CATALOGS"); + + node.getLikePattern().ifPresent((value) -> + builder.append(" LIKE ") + .append( + ExpressionFormatter.formatStringLiteral(value))); + + return null; + } + + @Override + protected Void visitShowSchemas(ShowSchemas node, Integer context) { + builder.append("SHOW SCHEMAS"); + + if (node.getCatalog().isPresent()) { + builder.append(" FROM ") + .append(node.getCatalog().get()); + } + + node.getLikePattern().ifPresent((value) -> + builder.append(" LIKE ") + .append( + ExpressionFormatter.formatStringLiteral(value))); + + return null; + } + + @Override + protected Void visitShowCreate(ShowCreate node, Integer context) { + if (node.getType() == ShowCreate.Type.TABLE) { + builder.append("SHOW CREATE TABLE ") + .append(formatName(node.getName())); + } else if (node.getType() == ShowCreate.Type.VIEW) { + builder.append("SHOW CREATE VIEW ") + .append(formatName(node.getName())); + } + + return null; + } + + @Override + protected Void visitShowColumns(ShowColumns node, Integer context) { + builder.append("SHOW COLUMNS FROM ") + .append(node.getTable()); + + return null; + } + + @Override + protected Void visitShowPartitions(ShowPartitions node, Integer context) { + builder.append("SHOW PARTITIONS FROM ") + .append(node.getTable()); + + if (node.getWhere().isPresent()) { + builder.append(" WHERE ") + .append(ExpressionFormatter.formatExpression(node.getWhere().get())); + } + + if (!node.getOrderBy().isEmpty()) { + builder.append(" ORDER BY ") + .append(ExpressionFormatter.formatSortItems(node.getOrderBy())); + } + + if (node.getLimit().isPresent()) { + builder.append(" LIMIT ") + .append(node.getLimit().get()); + } + + return null; + } + + @Override + protected Void visitShowFunctions(ShowFunctions node, Integer context) { + builder.append("SHOW FUNCTIONS"); + + return null; + } + + @Override + protected Void visitShowSession(ShowSession node, Integer context) { + builder.append("SHOW SESSION"); + + return null; + } + + @Override + protected Void visitDelete(Delete node, Integer context) { + builder.append("DELETE FROM ") + .append(node.getTable().getName()); + + if (node.getWhere().isPresent()) { + builder.append(" WHERE ") + .append(ExpressionFormatter.formatExpression(node.getWhere().get())); + } + + return null; + } + + @Override + protected Void visitCreateTableAsSelect(CreateTableAsSelect node, Integer indent) { + builder.append("CREATE TABLE "); + if (node.isNotExists()) { + builder.append("IF NOT EXISTS "); + } + builder.append(node.getName()); + + if (!node.getProperties().isEmpty()) { + builder.append(" WITH ("); + Joiner.on(", ") + .appendTo(builder, transform( + node.getProperties().entrySet(), entry -> entry.getKey() + " = " + + ExpressionFormatter + .formatExpression(entry.getValue()))); + builder.append(")"); + } + + builder.append(" AS "); + process(node.getQuery(), indent); + return null; + } + + @Override + protected Void visitCreateTable(CreateTable node, Integer indent) { + builder.append("CREATE TABLE "); + if (node.isNotExists()) { + builder.append("IF NOT EXISTS "); + } + String tableName = formatName(node.getName()); + builder.append(tableName).append(" (\n"); + + String columnList = node.getElements().stream() + .map(column -> INDENT + formatName(column.getName()) + " " + column.getType()) + .collect(joining(",\n")); + builder.append(columnList); + builder.append("\n").append(")"); + + if (!node.getProperties().isEmpty()) { + builder.append("\nWITH (\n"); + // Always output the table properties in sorted order + String propertyList = ImmutableSortedMap.copyOf(node.getProperties()).entrySet().stream() + .map(entry -> INDENT + formatName(entry.getKey()) + " = " + entry.getValue()) + .collect(joining(",\n")); + builder.append(propertyList); + builder.append("\n").append(")"); + } + + return null; + } + + private static String formatName(String name) { + if (NAME_PATTERN.matcher(name).matches()) { + return name; + } + return "\"" + name + "\""; + } + + private static String formatName(QualifiedName name) { + return name.getParts().stream() + .map(Formatter::formatName) + .collect(joining(".")); + } + + @Override + protected Void visitDropTable(DropTable node, Integer context) { + builder.append("DROP TABLE "); + if (node.isExists()) { + builder.append("IF EXISTS "); + } + builder.append(node.getName()); + + return null; + } + + @Override + protected Void visitRenameTable(RenameTable node, Integer context) { + builder.append("ALTER TABLE ") + .append(node.getSource()) + .append(" RENAME TO ") + .append(node.getTarget()); + + return null; + } + + @Override + protected Void visitRenameColumn(RenameColumn node, Integer context) { + builder.append("ALTER TABLE ") + .append(node.getTable()) + .append(" RENAME COLUMN ") + .append(node.getSource()) + .append(" TO ") + .append(node.getTarget()); + + return null; + } + + @Override + public Void visitSetSession(SetSession node, Integer context) { + builder.append("SET SESSION ") + .append(node.getName()) + .append(" = ") + .append(ExpressionFormatter.formatExpression(node.getValue())); + + return null; + } + + + private void processRelation(Relation relation, Integer indent) { + // TODO: handle this properly + if (relation instanceof Table) { + builder.append("TABLE ") + .append(((Table) relation).getName()) + .append('\n'); + } else { + process(relation, indent); + } + } + + private StringBuilder append(int indent, String value) { + return builder.append(indentString(indent)) + .append(value); + } + + private static String indentString(int indent) { + return Strings.repeat(INDENT, indent); + } + } + + private static void appendAliasColumns(StringBuilder builder, List columns) { + if ((columns != null) && (!columns.isEmpty())) { + builder.append(" ("); + Joiner.on(", ").appendTo(builder, columns); + builder.append(')'); + } + } +} \ No newline at end of file diff --git a/ksql-core/src/main/java/io/confluent/ksql/parser/rewrite/AggregateExpressionRewriter.java b/ksql-core/src/main/java/io/confluent/ksql/parser/rewrite/AggregateExpressionRewriter.java new file mode 100644 index 000000000000..c377de11f786 --- /dev/null +++ b/ksql-core/src/main/java/io/confluent/ksql/parser/rewrite/AggregateExpressionRewriter.java @@ -0,0 +1,39 @@ +/** + * Copyright 2017 Confluent Inc. + **/ + +package io.confluent.ksql.parser.rewrite; + +import io.confluent.ksql.function.KsqlFunctions; +import io.confluent.ksql.parser.tree.Expression; +import io.confluent.ksql.parser.tree.ExpressionRewriter; +import io.confluent.ksql.parser.tree.ExpressionTreeRewriter; +import io.confluent.ksql.parser.tree.FunctionCall; +import io.confluent.ksql.parser.tree.QualifiedName; +import io.confluent.ksql.parser.tree.QualifiedNameReference; + +import java.util.ArrayList; +import java.util.List; + +public class AggregateExpressionRewriter extends ExpressionRewriter { + + public static final String AGGREGATE_FUNCTION_VARIABLE_PREFIX = "KSQL_AGG_VARIABLE_"; + int aggVariableIndex = 0; + + @Override + public Expression rewriteFunctionCall(FunctionCall node, Void context, + ExpressionTreeRewriter treeRewriter) { + String functionName = node.getName().getSuffix(); + if (KsqlFunctions.isAnAggregateFunction(functionName)) { + String aggVarName = AGGREGATE_FUNCTION_VARIABLE_PREFIX + aggVariableIndex; + aggVariableIndex++; + return new QualifiedNameReference(QualifiedName.of(aggVarName)); + } else { + List arguments = new ArrayList<>(); + for (Expression argExpression: node.getArguments()) { + arguments.add(treeRewriter.rewrite(argExpression, context)); + } + return new FunctionCall(node.getName(), arguments); + } + } +} diff --git a/ksql-core/src/main/java/io/confluent/ksql/parser/rewrite/ExpressionFormatterQueryRewrite.java b/ksql-core/src/main/java/io/confluent/ksql/parser/rewrite/ExpressionFormatterQueryRewrite.java new file mode 100644 index 000000000000..bfb85d5d5071 --- /dev/null +++ b/ksql-core/src/main/java/io/confluent/ksql/parser/rewrite/ExpressionFormatterQueryRewrite.java @@ -0,0 +1,553 @@ +/** + * Copyright 2017 Confluent Inc. + **/ + +package io.confluent.ksql.parser.rewrite; + +import com.google.common.base.Joiner; +import com.google.common.collect.ImmutableList; +import com.google.common.collect.ImmutableSet; +import io.confluent.ksql.parser.SqlFormatter; +import io.confluent.ksql.parser.tree.AllColumns; +import io.confluent.ksql.parser.tree.ArithmeticBinaryExpression; +import io.confluent.ksql.parser.tree.ArithmeticUnaryExpression; +import io.confluent.ksql.parser.tree.AstVisitor; +import io.confluent.ksql.parser.tree.BetweenPredicate; +import io.confluent.ksql.parser.tree.BinaryLiteral; +import io.confluent.ksql.parser.tree.BooleanLiteral; +import io.confluent.ksql.parser.tree.Cast; +import io.confluent.ksql.parser.tree.ComparisonExpression; +import io.confluent.ksql.parser.tree.DecimalLiteral; +import io.confluent.ksql.parser.tree.DereferenceExpression; +import io.confluent.ksql.parser.tree.DoubleLiteral; +import io.confluent.ksql.parser.tree.ExistsPredicate; +import io.confluent.ksql.parser.tree.Expression; +import io.confluent.ksql.parser.tree.Extract; +import io.confluent.ksql.parser.tree.FieldReference; +import io.confluent.ksql.parser.tree.FrameBound; +import io.confluent.ksql.parser.tree.FunctionCall; +import io.confluent.ksql.parser.tree.GenericLiteral; +import io.confluent.ksql.parser.tree.GroupingElement; +import io.confluent.ksql.parser.tree.GroupingSets; +import io.confluent.ksql.parser.tree.InListExpression; +import io.confluent.ksql.parser.tree.InPredicate; +import io.confluent.ksql.parser.tree.IntervalLiteral; +import io.confluent.ksql.parser.tree.IsNotNullPredicate; +import io.confluent.ksql.parser.tree.IsNullPredicate; +import io.confluent.ksql.parser.tree.LambdaExpression; +import io.confluent.ksql.parser.tree.LikePredicate; +import io.confluent.ksql.parser.tree.LogicalBinaryExpression; +import io.confluent.ksql.parser.tree.LongLiteral; +import io.confluent.ksql.parser.tree.Node; +import io.confluent.ksql.parser.tree.NotExpression; +import io.confluent.ksql.parser.tree.NullIfExpression; +import io.confluent.ksql.parser.tree.NullLiteral; +import io.confluent.ksql.parser.tree.QualifiedName; +import io.confluent.ksql.parser.tree.QualifiedNameReference; +import io.confluent.ksql.parser.tree.Row; +import io.confluent.ksql.parser.tree.SearchedCaseExpression; +import io.confluent.ksql.parser.tree.SimpleCaseExpression; +import io.confluent.ksql.parser.tree.SimpleGroupBy; +import io.confluent.ksql.parser.tree.SortItem; +import io.confluent.ksql.parser.tree.StringLiteral; +import io.confluent.ksql.parser.tree.SubqueryExpression; +import io.confluent.ksql.parser.tree.SubscriptExpression; +import io.confluent.ksql.parser.tree.SymbolReference; +import io.confluent.ksql.parser.tree.TimeLiteral; +import io.confluent.ksql.parser.tree.TimestampLiteral; +import io.confluent.ksql.parser.tree.WhenClause; +import io.confluent.ksql.parser.tree.Window; +import io.confluent.ksql.parser.tree.WindowFrame; + +import java.util.ArrayList; +import java.util.List; +import java.util.Set; +import java.util.function.Function; + +import static com.google.common.collect.Iterables.getOnlyElement; +import static java.lang.String.format; +import static java.util.stream.Collectors.toList; + +public final class ExpressionFormatterQueryRewrite { + + public ExpressionFormatterQueryRewrite() { + } + + public static String formatExpression(Expression expression) { + return formatExpression(expression, true); + } + + public static String formatExpression(Expression expression, boolean unmangleNames) { + return new Formatter().process(expression, unmangleNames); + } + + public static class Formatter + extends AstVisitor { + + @Override + protected String visitNode(Node node, Boolean unmangleNames) { + throw new UnsupportedOperationException(); + } + + @Override + protected String visitRow(Row node, Boolean unmangleNames) { + return "ROW (" + Joiner.on(", ").join(node.getItems().stream() + .map((child) -> process(child, unmangleNames)) + .collect(toList())) + ")"; + } + + @Override + protected String visitExpression(Expression node, Boolean unmangleNames) { + throw new UnsupportedOperationException( + format("not yet implemented: %s.visit%s", getClass().getName(), + node.getClass().getSimpleName())); + } + + @Override + protected String visitExtract(Extract node, Boolean unmangleNames) { + return "EXTRACT(" + node.getField() + " FROM " + process(node.getExpression(), unmangleNames) + + ")"; + } + + @Override + protected String visitBooleanLiteral(BooleanLiteral node, Boolean unmangleNames) { + return String.valueOf(node.getValue()); + } + + @Override + protected String visitStringLiteral(StringLiteral node, Boolean unmangleNames) { + return formatStringLiteral(node.getValue()); + } + + @Override + protected String visitBinaryLiteral(BinaryLiteral node, Boolean unmangleNames) { + return "X'" + node.toHexString() + "'"; + } + + @Override + protected String visitSubscriptExpression(SubscriptExpression node, Boolean unmangleNames) { + return SqlFormatter.formatSql(node.getBase(), unmangleNames) + "[" + SqlFormatter + .formatSql(node.getIndex(), unmangleNames) + "]"; + } + + @Override + protected String visitLongLiteral(LongLiteral node, Boolean unmangleNames) { + return Long.toString(node.getValue()); + } + + @Override + protected String visitDoubleLiteral(DoubleLiteral node, Boolean unmangleNames) { + return Double.toString(node.getValue()); + } + + @Override + protected String visitDecimalLiteral(DecimalLiteral node, Boolean unmangleNames) { + return "DECIMAL '" + node.getValue() + "'"; + } + + @Override + protected String visitGenericLiteral(GenericLiteral node, Boolean unmangleNames) { + return node.getType() + " " + formatStringLiteral(node.getValue()); + } + + @Override + protected String visitTimeLiteral(TimeLiteral node, Boolean unmangleNames) { + return "TIME '" + node.getValue() + "'"; + } + + @Override + protected String visitTimestampLiteral(TimestampLiteral node, Boolean unmangleNames) { + return "TIMESTAMP '" + node.getValue() + "'"; + } + + @Override + protected String visitNullLiteral(NullLiteral node, Boolean unmangleNames) { + return "null"; + } + + @Override + protected String visitIntervalLiteral(IntervalLiteral node, Boolean unmangleNames) { + String sign = (node.getSign() == IntervalLiteral.Sign.NEGATIVE) ? "- " : ""; + StringBuilder builder = new StringBuilder() + .append("INTERVAL ") + .append(sign) + .append(" '").append(node.getValue()).append("' ") + .append(node.getStartField()); + + if (node.getEndField().isPresent()) { + builder.append(" TO ").append(node.getEndField().get()); + } + return builder.toString(); + } + + @Override + protected String visitSubqueryExpression(SubqueryExpression node, Boolean unmangleNames) { + return "(" + SqlFormatter.formatSql(node.getQuery(), unmangleNames) + ")"; + } + + @Override + protected String visitExists(ExistsPredicate node, Boolean unmangleNames) { + return "(EXISTS (" + SqlFormatter.formatSql(node.getSubquery(), unmangleNames) + "))"; + } + + @Override + protected String visitQualifiedNameReference(QualifiedNameReference node, + Boolean unmangleNames) { + return formatQualifiedName(node.getName()); + } + + @Override + protected String visitSymbolReference(SymbolReference node, Boolean context) { + return formatIdentifier(node.getName()); + } + + @Override + protected String visitDereferenceExpression(DereferenceExpression node, Boolean unmangleNames) { + String baseString = process(node.getBase(), unmangleNames); + return baseString + "." + formatIdentifier(node.getFieldName()); + } + + private static String formatQualifiedName(QualifiedName name) { + List parts = new ArrayList<>(); + for (String part : name.getParts()) { + parts.add(formatIdentifier(part)); + } + return Joiner.on('.').join(parts); + } + + @Override + public String visitFieldReference(FieldReference node, Boolean unmangleNames) { + // add colon so this won't parse + return ":input(" + node.getFieldIndex() + ")"; + } + + @Override + protected String visitFunctionCall(FunctionCall node, Boolean unmangleNames) { + StringBuilder builder = new StringBuilder(); + + String arguments = joinExpressions(node.getArguments(), unmangleNames); + if (node.getArguments().isEmpty() && "COUNT".equals(node.getName().getSuffix())) { + arguments = "*"; + } + if (node.isDistinct()) { + arguments = "DISTINCT " + arguments; + } + + builder.append(formatQualifiedName(node.getName())) + .append('(').append(arguments).append(')'); + + if (node.getWindow().isPresent()) { + builder.append(" OVER ").append(visitWindow(node.getWindow().get(), unmangleNames)); + } + + return builder.toString(); + } + + @Override + protected String visitLambdaExpression(LambdaExpression node, Boolean unmangleNames) { + StringBuilder builder = new StringBuilder(); + + builder.append('('); + Joiner.on(", ").appendTo(builder, node.getArguments()); + builder.append(") -> "); + builder.append(process(node.getBody(), unmangleNames)); + return builder.toString(); + } + + @Override + protected String visitLogicalBinaryExpression(LogicalBinaryExpression node, + Boolean unmangleNames) { + return formatBinaryExpression(node.getType().toString(), node.getLeft(), node.getRight(), + unmangleNames); + } + + @Override + protected String visitNotExpression(NotExpression node, Boolean unmangleNames) { + return "(NOT " + process(node.getValue(), unmangleNames) + ")"; + } + + @Override + protected String visitComparisonExpression(ComparisonExpression node, Boolean unmangleNames) { + return formatBinaryExpression(node.getType().getValue(), node.getLeft(), node.getRight(), + unmangleNames); + } + + @Override + protected String visitIsNullPredicate(IsNullPredicate node, Boolean unmangleNames) { + return "(" + process(node.getValue(), unmangleNames) + " IS NULL)"; + } + + @Override + protected String visitIsNotNullPredicate(IsNotNullPredicate node, Boolean unmangleNames) { + return "(" + process(node.getValue(), unmangleNames) + " IS NOT NULL)"; + } + + @Override + protected String visitNullIfExpression(NullIfExpression node, Boolean unmangleNames) { + return "NULLIF(" + process(node.getFirst(), unmangleNames) + ", " + process(node.getSecond(), + unmangleNames) + + ')'; + } + + @Override + protected String visitArithmeticUnary(ArithmeticUnaryExpression node, Boolean unmangleNames) { + String value = process(node.getValue(), unmangleNames); + + switch (node.getSign()) { + case MINUS: + // this is to avoid turning a sequence of "-" into a comment (i.e., "-- comment") + String separator = value.startsWith("-") ? " " : ""; + return "-" + separator + value; + case PLUS: + return "+" + value; + default: + throw new UnsupportedOperationException("Unsupported sign: " + node.getSign()); + } + } + + @Override + protected String visitArithmeticBinary(ArithmeticBinaryExpression node, Boolean unmangleNames) { + return formatBinaryExpression(node.getType().getValue(), node.getLeft(), node.getRight(), + unmangleNames); + } + + @Override + protected String visitLikePredicate(LikePredicate node, Boolean unmangleNames) { + StringBuilder builder = new StringBuilder(); + + builder.append('(') + .append(process(node.getValue(), unmangleNames)) + .append(" LIKE ") + .append(process(node.getPattern(), unmangleNames)); + + if (node.getEscape() != null) { + builder.append(" ESCAPE ") + .append(process(node.getEscape(), unmangleNames)); + } + + builder.append(')'); + + return builder.toString(); + } + + @Override + protected String visitAllColumns(AllColumns node, Boolean unmangleNames) { + if (node.getPrefix().isPresent()) { + return node.getPrefix().get() + ".*"; + } + + return "*"; + } + + @Override + public String visitCast(Cast node, Boolean unmangleNames) { + return (node.isSafe() ? "TRY_CAST" : "CAST") + + "(" + process(node.getExpression(), unmangleNames) + " AS " + node.getType() + ")"; + } + + @Override + protected String visitSearchedCaseExpression(SearchedCaseExpression node, + Boolean unmangleNames) { + ImmutableList.Builder parts = ImmutableList.builder(); + parts.add("CASE"); + for (WhenClause whenClause : node.getWhenClauses()) { + parts.add(process(whenClause, unmangleNames)); + } + + node.getDefaultValue() + .ifPresent((value) -> parts.add("ELSE").add(process(value, unmangleNames))); + + parts.add("END"); + + return "(" + Joiner.on(' ').join(parts.build()) + ")"; + } + + @Override + protected String visitSimpleCaseExpression(SimpleCaseExpression node, Boolean unmangleNames) { + ImmutableList.Builder parts = ImmutableList.builder(); + + parts.add("CASE") + .add(process(node.getOperand(), unmangleNames)); + + for (WhenClause whenClause : node.getWhenClauses()) { + parts.add(process(whenClause, unmangleNames)); + } + + node.getDefaultValue() + .ifPresent((value) -> parts.add("ELSE").add(process(value, unmangleNames))); + + parts.add("END"); + + return "(" + Joiner.on(' ').join(parts.build()) + ")"; + } + + @Override + protected String visitWhenClause(WhenClause node, Boolean unmangleNames) { + return "WHEN " + process(node.getOperand(), unmangleNames) + " THEN " + process( + node.getResult(), unmangleNames); + } + + @Override + protected String visitBetweenPredicate(BetweenPredicate node, Boolean unmangleNames) { + return "(" + process(node.getValue(), unmangleNames) + " BETWEEN " + + process(node.getMin(), unmangleNames) + " AND " + + process(node.getMax(), unmangleNames) + ")"; + } + + @Override + protected String visitInPredicate(InPredicate node, Boolean unmangleNames) { + return "(" + process(node.getValue(), unmangleNames) + " IN " + process(node.getValueList(), + unmangleNames) + ")"; + } + + @Override + protected String visitInListExpression(InListExpression node, Boolean unmangleNames) { + return "(" + joinExpressions(node.getValues(), unmangleNames) + ")"; + } + + @Override + public String visitWindow(Window node, Boolean unmangleNames) { + return node.toString(); + } + + @Override + public String visitWindowFrame(WindowFrame node, Boolean unmangleNames) { + StringBuilder builder = new StringBuilder(); + + builder.append(node.getType().toString()).append(' '); + + if (node.getEnd().isPresent()) { + builder.append("BETWEEN ") + .append(process(node.getStart(), unmangleNames)) + .append(" AND ") + .append(process(node.getEnd().get(), unmangleNames)); + } else { + builder.append(process(node.getStart(), unmangleNames)); + } + + return builder.toString(); + } + + @Override + public String visitFrameBound(FrameBound node, Boolean unmangleNames) { + switch (node.getType()) { + case UNBOUNDED_PRECEDING: + return "UNBOUNDED PRECEDING"; + case PRECEDING: + return process(node.getValue().get(), unmangleNames) + " PRECEDING"; + case CURRENT_ROW: + return "CURRENT ROW"; + case FOLLOWING: + return process(node.getValue().get(), unmangleNames) + " FOLLOWING"; + case UNBOUNDED_FOLLOWING: + return "UNBOUNDED FOLLOWING"; + default: + throw new IllegalArgumentException("unhandled type: " + node.getType()); + } + } + + private String formatBinaryExpression(String operator, Expression left, Expression right, + boolean unmangleNames) { + return '(' + process(left, unmangleNames) + ' ' + operator + ' ' + process(right, + unmangleNames) + + ')'; + } + + private String joinExpressions(List expressions, boolean unmangleNames) { + return Joiner.on(", ").join(expressions.stream() + .map((e) -> process(e, unmangleNames)) + .iterator()); + } + + private static String formatIdentifier(String s) { + // TODO: handle escaping properly + return s; + } + } + + static String formatStringLiteral(String s) { + return "'" + s.replace("'", "''") + "'"; + } + + static String formatSortItems(List sortItems) { + return formatSortItems(sortItems, true); + } + + static String formatSortItems(List sortItems, boolean unmangleNames) { + return Joiner.on(", ").join(sortItems.stream() + .map(sortItemFormatterFunction(unmangleNames)) + .iterator()); + } + + static String formatGroupBy(List groupingElements) { + ImmutableList.Builder resultStrings = ImmutableList.builder(); + + for (GroupingElement groupingElement : groupingElements) { + String result = ""; + if (groupingElement instanceof SimpleGroupBy) { + Set + columns = + ImmutableSet.copyOf(((SimpleGroupBy) groupingElement).getColumnExpressions()); + if (columns.size() == 1) { + result = formatExpression(getOnlyElement(columns)); + } else { + result = formatGroupingSet(columns); + } + } else if (groupingElement instanceof GroupingSets) { + result = format("GROUPING SETS (%s)", Joiner.on(", ").join( + groupingElement.enumerateGroupingSets().stream() + .map(ExpressionFormatterQueryRewrite::formatGroupingSet) + .iterator())); + } + resultStrings.add(result); + } + return Joiner.on(", ").join(resultStrings.build()); + } + + private static String formatGroupingSet(Set groupingSet) { + return format("(%s)", Joiner.on(", ") + .join(groupingSet + .stream().map(ExpressionFormatterQueryRewrite::formatExpression) + .iterator())); + } + + private static String formatGroupingSet(List groupingSet) { + return format("(%s)", Joiner.on(", ").join(groupingSet)); + } + + private static Function sortItemFormatterFunction(boolean unmangleNames) { + return input -> { + StringBuilder builder = new StringBuilder(); + + builder.append(formatExpression(input.getSortKey(), unmangleNames)); + + switch (input.getOrdering()) { + case ASCENDING: + builder.append(" ASC"); + break; + case DESCENDING: + builder.append(" DESC"); + break; + default: + throw new UnsupportedOperationException("unknown ordering: " + input.getOrdering()); + } + + switch (input.getNullOrdering()) { + case FIRST: + builder.append(" NULLS FIRST"); + break; + case LAST: + builder.append(" NULLS LAST"); + break; + case UNDEFINED: + // no op + break; + default: + throw new UnsupportedOperationException( + "unknown null ordering: " + input.getNullOrdering()); + } + + return builder.toString(); + }; + } +} \ No newline at end of file diff --git a/ksql-core/src/main/java/io/confluent/ksql/parser/rewrite/SqlFormatterQueryRewrite.java b/ksql-core/src/main/java/io/confluent/ksql/parser/rewrite/SqlFormatterQueryRewrite.java new file mode 100644 index 000000000000..634ce266be1a --- /dev/null +++ b/ksql-core/src/main/java/io/confluent/ksql/parser/rewrite/SqlFormatterQueryRewrite.java @@ -0,0 +1,724 @@ +/** + * Copyright 2017 Confluent Inc. + **/ + +package io.confluent.ksql.parser.rewrite; + +import com.google.common.base.Joiner; +import com.google.common.base.Strings; +import com.google.common.collect.ImmutableSortedMap; +import io.confluent.ksql.parser.tree.AliasedRelation; +import io.confluent.ksql.parser.tree.AllColumns; +import io.confluent.ksql.parser.tree.AstVisitor; +import io.confluent.ksql.parser.tree.CreateTable; +import io.confluent.ksql.parser.tree.CreateTableAsSelect; +import io.confluent.ksql.parser.tree.CreateView; +import io.confluent.ksql.parser.tree.Delete; +import io.confluent.ksql.parser.tree.DropTable; +import io.confluent.ksql.parser.tree.DropView; +import io.confluent.ksql.parser.tree.Except; +import io.confluent.ksql.parser.tree.Explain; +import io.confluent.ksql.parser.tree.ExplainFormat; +import io.confluent.ksql.parser.tree.ExplainOption; +import io.confluent.ksql.parser.tree.ExplainType; +import io.confluent.ksql.parser.tree.Expression; +import io.confluent.ksql.parser.tree.Intersect; +import io.confluent.ksql.parser.tree.Join; +import io.confluent.ksql.parser.tree.JoinCriteria; +import io.confluent.ksql.parser.tree.JoinOn; +import io.confluent.ksql.parser.tree.JoinUsing; +import io.confluent.ksql.parser.tree.NaturalJoin; +import io.confluent.ksql.parser.tree.Node; +import io.confluent.ksql.parser.tree.QualifiedName; +import io.confluent.ksql.parser.tree.Query; +import io.confluent.ksql.parser.tree.QuerySpecification; +import io.confluent.ksql.parser.tree.Relation; +import io.confluent.ksql.parser.tree.RenameColumn; +import io.confluent.ksql.parser.tree.RenameTable; +import io.confluent.ksql.parser.tree.Row; +import io.confluent.ksql.parser.tree.SampledRelation; +import io.confluent.ksql.parser.tree.Select; +import io.confluent.ksql.parser.tree.SelectItem; +import io.confluent.ksql.parser.tree.SetSession; +import io.confluent.ksql.parser.tree.ShowCatalogs; +import io.confluent.ksql.parser.tree.ShowColumns; +import io.confluent.ksql.parser.tree.ShowCreate; +import io.confluent.ksql.parser.tree.ShowFunctions; +import io.confluent.ksql.parser.tree.ShowPartitions; +import io.confluent.ksql.parser.tree.ShowSchemas; +import io.confluent.ksql.parser.tree.ShowSession; +import io.confluent.ksql.parser.tree.SingleColumn; +import io.confluent.ksql.parser.tree.Table; +import io.confluent.ksql.parser.tree.TableSubquery; +import io.confluent.ksql.parser.tree.Union; +import io.confluent.ksql.parser.tree.Values; +import io.confluent.ksql.parser.tree.With; +import io.confluent.ksql.parser.tree.WithQuery; + +import java.util.ArrayList; +import java.util.Iterator; +import java.util.List; +import java.util.regex.Pattern; + +import static com.google.common.base.Preconditions.checkArgument; +import static com.google.common.collect.Iterables.getOnlyElement; +import static com.google.common.collect.Iterables.transform; +import static java.util.stream.Collectors.joining; + +public final class SqlFormatterQueryRewrite { + + private static final String INDENT = " "; + private static final Pattern NAME_PATTERN = Pattern.compile("[a-z_][a-z0-9_]*"); + + private boolean isJoin = false; + + private SqlFormatterQueryRewrite() { + } + + public static String formatSql(Node root) { + StringBuilder builder = new StringBuilder(); + new Formatter(builder, true).process(root, 0); + return builder.toString(); + } + + public static String formatSql(Node root, boolean unmangleNames) { + StringBuilder builder = new StringBuilder(); + new Formatter(builder, unmangleNames).process(root, 0); + return builder.toString(); + } + + private static class Formatter + extends AstVisitor { + + private final StringBuilder builder; + private final boolean unmangledNames; + + public Formatter(StringBuilder builder, boolean unmangleNames) { + this.builder = builder; + this.unmangledNames = unmangleNames; + } + + @Override + protected Void visitNode(Node node, Integer indent) { + throw new UnsupportedOperationException("not yet implemented: " + node); + } + + @Override + protected Void visitExpression(Expression node, Integer indent) { + checkArgument(indent == 0, + "visitExpression should only be called at root"); + builder.append(ExpressionFormatterQueryRewrite.formatExpression(node, unmangledNames)); + return null; + } + + @Override + protected Void visitQuery(Query node, Integer indent) { + if (node.getWith().isPresent()) { + With with = node.getWith().get(); + append(indent, "WITH"); + if (with.isRecursive()) { + builder.append(" RECURSIVE"); + } + builder.append("\n "); + Iterator queries = with.getQueries().iterator(); + while (queries.hasNext()) { + WithQuery query = queries.next(); + append(indent, query.getName()); + query.getColumnNames().ifPresent(columnNames -> appendAliasColumns(builder, columnNames)); + builder.append(" AS "); + process(new TableSubquery(query.getQuery()), indent); + builder.append('\n'); + if (queries.hasNext()) { + builder.append(", "); + } + } + } + + processRelation(node.getQueryBody(), indent); + + if (!node.getOrderBy().isEmpty()) { + append(indent, + "ORDER BY " + + ExpressionFormatterQueryRewrite.formatSortItems(node.getOrderBy())) + .append('\n'); + } + + if (node.getLimit().isPresent()) { + append(indent, "LIMIT " + node.getLimit().get()) + .append('\n'); + } + return null; + } + + @Override + protected Void visitQuerySpecification(QuerySpecification node, Integer indent) { + process(node.getSelect(), indent); + + if (node.getInto().isPresent()) { + append(indent, "INTO"); + builder.append('\n'); + append(indent, " "); + process(node.getInto().get(), indent); + builder.append('\n'); + } + + if (node.getFrom().isPresent()) { + append(indent, "FROM"); + builder.append('\n'); + append(indent, " "); + process(node.getFrom().get(), indent); + } + + builder.append('\n'); + + if (node.getWhere().isPresent()) { + append(indent, + "WHERE " + + ExpressionFormatterQueryRewrite.formatExpression(node.getWhere().get())) + .append('\n'); + } + + if (node.getGroupBy().isPresent()) { + append(indent, "GROUP BY " + + (node.getGroupBy().get().isDistinct() ? " DISTINCT " : "") + + ExpressionFormatterQueryRewrite + .formatGroupBy(node.getGroupBy().get().getGroupingElements())) + .append('\n'); + } + + if (node.getHaving().isPresent()) { + append(indent, + "HAVING " + + ExpressionFormatterQueryRewrite.formatExpression(node.getHaving().get())) + .append('\n'); + } + + if (!node.getOrderBy().isEmpty()) { + append(indent, + "ORDER BY " + + ExpressionFormatterQueryRewrite.formatSortItems(node.getOrderBy())) + .append('\n'); + } + + if (node.getLimit().isPresent()) { + append(indent, "LIMIT " + node.getLimit().get()) + .append('\n'); + } + return null; + } + + @Override + protected Void visitSelect(Select node, Integer indent) { + append(indent, "SELECT"); + if (node.isDistinct()) { + builder.append(" DISTINCT"); + } + + if (node.getSelectItems().size() > 1) { + boolean first = true; + for (SelectItem item : node.getSelectItems()) { + builder.append("\n") + .append(indentString(indent)) + .append(first ? " " : ", "); + + process(item, indent); + first = false; + } + } else { + builder.append(' '); + process(getOnlyElement(node.getSelectItems()), indent); + } + + builder.append('\n'); + + return null; + } + + @Override + protected Void visitSingleColumn(SingleColumn node, Integer indent) { + builder.append(ExpressionFormatterQueryRewrite.formatExpression(node.getExpression())); + if (node.getAlias().isPresent()) { + builder.append(' ') + .append(" AS ") + .append(node.getAlias().get()); + // TODO: handle quoting properly + } + + return null; + } + + @Override + protected Void visitAllColumns(AllColumns node, Integer context) { + builder.append(node.toString()); + + return null; + } + + @Override + protected Void visitTable(Table node, Integer indent) { + builder.append(node.getName().toString()); + return null; + } + + @Override + protected Void visitJoin(Join node, Integer indent) { + JoinCriteria criteria = node.getCriteria().orElse(null); + String type = node.getType().toString(); + if (criteria instanceof NaturalJoin) { + type = "NATURAL " + type; + } + + process(node.getLeft(), indent); + + builder.append('\n'); + if (node.getType() == Join.Type.IMPLICIT) { + append(indent, ", "); + } else { + append(indent, type).append(" JOIN "); + } + + process(node.getRight(), indent); + + if (node.getType() != Join.Type.CROSS && node.getType() != Join.Type.IMPLICIT) { + if (criteria instanceof JoinUsing) { + JoinUsing using = (JoinUsing) criteria; + builder.append(" USING (") + .append(Joiner.on(", ").join(using.getColumns())) + .append(")"); + } else if (criteria instanceof JoinOn) { + JoinOn on = (JoinOn) criteria; + builder.append(" ON (") + .append(ExpressionFormatterQueryRewrite.formatExpression(on.getExpression())) + .append(")"); + } else if (!(criteria instanceof NaturalJoin)) { + throw new UnsupportedOperationException("unknown join criteria: " + criteria); + } + } + + return null; + } + + @Override + protected Void visitAliasedRelation(AliasedRelation node, Integer indent) { + process(node.getRelation(), indent); + + builder.append(' ') + .append(node.getAlias()); + + appendAliasColumns(builder, node.getColumnNames()); + + return null; + } + + @Override + protected Void visitSampledRelation(SampledRelation node, Integer indent) { + process(node.getRelation(), indent); + + builder.append(" TABLESAMPLE ") + .append(node.getType()) + .append(" (") + .append(node.getSamplePercentage()) + .append(')'); + + if (node.getColumnsToStratifyOn().isPresent()) { + builder.append(" STRATIFY ON ") + .append(" (") + .append(Joiner.on(",").join(node.getColumnsToStratifyOn().get())); + builder.append(')'); + } + + return null; + } + + @Override + protected Void visitValues(Values node, Integer indent) { + builder.append(" VALUES "); + + boolean first = true; + for (Expression row : node.getRows()) { + builder.append("\n") + .append(indentString(indent)) + .append(first ? " " : ", "); + + builder.append(ExpressionFormatterQueryRewrite.formatExpression(row)); + first = false; + } + builder.append('\n'); + + return null; + } + + @Override + protected Void visitTableSubquery(TableSubquery node, Integer indent) { + builder.append('(') + .append('\n'); + + process(node.getQuery(), indent + 1); + + append(indent, ") "); + + return null; + } + + @Override + protected Void visitUnion(Union node, Integer indent) { + Iterator relations = node.getRelations().iterator(); + + while (relations.hasNext()) { + processRelation(relations.next(), indent); + + if (relations.hasNext()) { + builder.append("UNION "); + if (!node.isDistinct()) { + builder.append("ALL "); + } + } + } + + return null; + } + + @Override + protected Void visitExcept(Except node, Integer indent) { + processRelation(node.getLeft(), indent); + + builder.append("EXCEPT "); + if (!node.isDistinct()) { + builder.append("ALL "); + } + + processRelation(node.getRight(), indent); + + return null; + } + + @Override + protected Void visitIntersect(Intersect node, Integer indent) { + Iterator relations = node.getRelations().iterator(); + + while (relations.hasNext()) { + processRelation(relations.next(), indent); + + if (relations.hasNext()) { + builder.append("INTERSECT "); + if (!node.isDistinct()) { + builder.append("ALL "); + } + } + } + + return null; + } + + @Override + protected Void visitCreateView(CreateView node, Integer indent) { + builder.append("CREATE "); + if (node.isReplace()) { + builder.append("OR REPLACE "); + } + builder.append("VIEW ") + .append(node.getName()) + .append(" AS\n"); + + process(node.getQuery(), indent); + + return null; + } + + @Override + protected Void visitDropView(DropView node, Integer context) { + builder.append("DROP VIEW "); + if (node.isExists()) { + builder.append("IF EXISTS "); + } + builder.append(node.getName()); + + return null; + } + + @Override + protected Void visitExplain(Explain node, Integer indent) { + builder.append("EXPLAIN "); + if (node.isAnalyze()) { + builder.append("ANALYZE "); + } + + List options = new ArrayList<>(); + + for (ExplainOption option : node.getOptions()) { + if (option instanceof ExplainType) { + options.add("TYPE " + ((ExplainType) option).getType()); + } else if (option instanceof ExplainFormat) { + options.add("FORMAT " + ((ExplainFormat) option).getType()); + } else { + throw new UnsupportedOperationException("unhandled explain option: " + option); + } + } + + if (!options.isEmpty()) { + builder.append("("); + Joiner.on(", ").appendTo(builder, options); + builder.append(")"); + } + + builder.append("\n"); + + process(node.getStatement(), indent); + + return null; + } + + @Override + protected Void visitShowCatalogs(ShowCatalogs node, Integer context) { + builder.append("SHOW CATALOGS"); + + node.getLikePattern().ifPresent((value) -> + builder.append(" LIKE ") + .append(ExpressionFormatterQueryRewrite + .formatStringLiteral(value))); + + return null; + } + + @Override + protected Void visitShowSchemas(ShowSchemas node, Integer context) { + builder.append("SHOW SCHEMAS"); + + if (node.getCatalog().isPresent()) { + builder.append(" FROM ") + .append(node.getCatalog().get()); + } + + node.getLikePattern().ifPresent((value) -> + builder.append(" LIKE ") + .append(ExpressionFormatterQueryRewrite + .formatStringLiteral(value))); + + return null; + } + + @Override + protected Void visitShowCreate(ShowCreate node, Integer context) { + if (node.getType() == ShowCreate.Type.TABLE) { + builder.append("SHOW CREATE TABLE ") + .append(formatName(node.getName())); + } else if (node.getType() == ShowCreate.Type.VIEW) { + builder.append("SHOW CREATE VIEW ") + .append(formatName(node.getName())); + } + + return null; + } + + @Override + protected Void visitShowColumns(ShowColumns node, Integer context) { + builder.append("SHOW COLUMNS FROM ") + .append(node.getTable()); + + return null; + } + + @Override + protected Void visitShowPartitions(ShowPartitions node, Integer context) { + builder.append("SHOW PARTITIONS FROM ") + .append(node.getTable()); + + if (node.getWhere().isPresent()) { + builder.append(" WHERE ") + .append(ExpressionFormatterQueryRewrite.formatExpression(node.getWhere().get())); + } + + if (!node.getOrderBy().isEmpty()) { + builder.append(" ORDER BY ") + .append(ExpressionFormatterQueryRewrite.formatSortItems(node.getOrderBy())); + } + + if (node.getLimit().isPresent()) { + builder.append(" LIMIT ") + .append(node.getLimit().get()); + } + + return null; + } + + @Override + protected Void visitShowFunctions(ShowFunctions node, Integer context) { + builder.append("SHOW FUNCTIONS"); + + return null; + } + + @Override + protected Void visitShowSession(ShowSession node, Integer context) { + builder.append("SHOW SESSION"); + + return null; + } + + @Override + protected Void visitDelete(Delete node, Integer context) { + builder.append("DELETE FROM ") + .append(node.getTable().getName()); + + if (node.getWhere().isPresent()) { + builder.append(" WHERE ") + .append(ExpressionFormatterQueryRewrite.formatExpression(node.getWhere().get())); + } + + return null; + } + + @Override + protected Void visitCreateTableAsSelect(CreateTableAsSelect node, Integer indent) { + builder.append("CREATE TABLE "); + if (node.isNotExists()) { + builder.append("IF NOT EXISTS "); + } + builder.append(node.getName()); + + if (!node.getProperties().isEmpty()) { + builder.append(" WITH ("); + Joiner.on(", ").appendTo(builder, + transform(node.getProperties().entrySet(), + entry -> entry.getKey() + " = " + + ExpressionFormatterQueryRewrite + .formatExpression(entry.getValue()))); + builder.append(")"); + } + + builder.append(" AS "); + process(node.getQuery(), indent); + + return null; + } + + @Override + protected Void visitCreateTable(CreateTable node, Integer indent) { + builder.append("CREATE TABLE "); + if (node.isNotExists()) { + builder.append("IF NOT EXISTS "); + } + String tableName = formatName(node.getName()); + builder.append(tableName).append(" (\n"); + + String columnList = node.getElements().stream() + .map(column -> INDENT + formatName(column.getName()) + " " + column.getType()) + .collect(joining(",\n")); + builder.append(columnList); + builder.append("\n").append(")"); + + if (!node.getProperties().isEmpty()) { + builder.append("\nWITH (\n"); + // Always output the table properties in sorted order + String propertyList = ImmutableSortedMap.copyOf(node.getProperties()).entrySet().stream() + .map(entry -> INDENT + formatName(entry.getKey()) + " = " + entry.getValue()) + .collect(joining(",\n")); + builder.append(propertyList); + builder.append("\n").append(")"); + } + + return null; + } + + private static String formatName(String name) { + if (NAME_PATTERN.matcher(name).matches()) { + return name; + } + return "\"" + name + "\""; + } + + private static String formatName(QualifiedName name) { + return name.getParts().stream() + .map(Formatter::formatName) + .collect(joining(".")); + } + + @Override + protected Void visitDropTable(DropTable node, Integer context) { + builder.append("DROP TABLE "); + if (node.isExists()) { + builder.append("IF EXISTS "); + } + builder.append(node.getName()); + + return null; + } + + @Override + protected Void visitRenameTable(RenameTable node, Integer context) { + builder.append("ALTER TABLE ") + .append(node.getSource()) + .append(" RENAME TO ") + .append(node.getTarget()); + + return null; + } + + @Override + protected Void visitRenameColumn(RenameColumn node, Integer context) { + builder.append("ALTER TABLE ") + .append(node.getTable()) + .append(" RENAME COLUMN ") + .append(node.getSource()) + .append(" TO ") + .append(node.getTarget()); + + return null; + } + + + + @Override + public Void visitSetSession(SetSession node, Integer context) { + builder.append("SET SESSION ") + .append(node.getName()) + .append(" = ") + .append(ExpressionFormatterQueryRewrite.formatExpression(node.getValue())); + + return null; + } + + @Override + protected Void visitRow(Row node, Integer indent) { + builder.append("ROW("); + boolean firstItem = true; + for (Expression item : node.getItems()) { + if (!firstItem) { + builder.append(", "); + } + process(item, indent); + firstItem = false; + } + builder.append(")"); + return null; + } + + private void processRelation(Relation relation, Integer indent) { + // TODO: handle this properly + if (relation instanceof Table) { + builder.append("TABLE ") + .append(((Table) relation).getName()) + .append('\n'); + } else { + process(relation, indent); + } + } + + private StringBuilder append(int indent, String value) { + return builder.append(indentString(indent)) + .append(value); + } + + private static String indentString(int indent) { + return Strings.repeat(INDENT, indent); + } + } + + private static void appendAliasColumns(StringBuilder builder, List columns) { + if ((columns != null) && (!columns.isEmpty())) { + builder.append(" ("); + Joiner.on(", ").appendTo(builder, columns); + builder.append(')'); + } + } +} \ No newline at end of file diff --git a/ksql-core/src/main/java/io/confluent/ksql/parser/tree/AbstractStreamCreateStatement.java b/ksql-core/src/main/java/io/confluent/ksql/parser/tree/AbstractStreamCreateStatement.java new file mode 100644 index 000000000000..c833eadbae40 --- /dev/null +++ b/ksql-core/src/main/java/io/confluent/ksql/parser/tree/AbstractStreamCreateStatement.java @@ -0,0 +1,21 @@ +/** + * Copyright 2017 Confluent Inc. + **/ + +package io.confluent.ksql.parser.tree; + +import java.util.List; +import java.util.Map; +import java.util.Optional; + +public abstract class AbstractStreamCreateStatement extends Statement { + public AbstractStreamCreateStatement(Optional location) { + super(location); + } + + public abstract Map getProperties(); + + public abstract QualifiedName getName(); + + public abstract List getElements(); +} diff --git a/ksql-core/src/main/java/io/confluent/ksql/parser/tree/AbstractStreamDropStatement.java b/ksql-core/src/main/java/io/confluent/ksql/parser/tree/AbstractStreamDropStatement.java new file mode 100644 index 000000000000..a4ac6d707a2d --- /dev/null +++ b/ksql-core/src/main/java/io/confluent/ksql/parser/tree/AbstractStreamDropStatement.java @@ -0,0 +1,15 @@ +/** + * Copyright 2017 Confluent Inc. + **/ + +package io.confluent.ksql.parser.tree; + +import java.util.Optional; + +public abstract class AbstractStreamDropStatement extends Statement { + public AbstractStreamDropStatement(Optional location) { + super(location); + } + + public abstract QualifiedName getName(); +} diff --git a/ksql-core/src/main/java/io/confluent/ksql/parser/tree/AliasedRelation.java b/ksql-core/src/main/java/io/confluent/ksql/parser/tree/AliasedRelation.java new file mode 100644 index 000000000000..8ba68e922f78 --- /dev/null +++ b/ksql-core/src/main/java/io/confluent/ksql/parser/tree/AliasedRelation.java @@ -0,0 +1,88 @@ +/** + * Copyright 2017 Confluent Inc. + * + **/ + +package io.confluent.ksql.parser.tree; + +import java.util.List; +import java.util.Objects; +import java.util.Optional; + +import static com.google.common.base.MoreObjects.toStringHelper; +import static java.util.Objects.requireNonNull; + +public class AliasedRelation + extends Relation { + + private final Relation relation; + private final String alias; + private final List columnNames; + + public AliasedRelation(Relation relation, String alias, List columnNames) { + this(Optional.empty(), relation, alias, columnNames); + } + + public AliasedRelation(NodeLocation location, Relation relation, String alias, + List columnNames) { + this(Optional.of(location), relation, alias, columnNames); + } + + private AliasedRelation(Optional location, Relation relation, String alias, + List columnNames) { + super(location); + requireNonNull(relation, "relation is null"); + requireNonNull(alias, " is null"); + + this.relation = relation; + this.alias = alias; + this.columnNames = columnNames; + } + + public Relation getRelation() { + return relation; + } + + public String getAlias() { + return alias; + } + + public List getColumnNames() { + return columnNames; + } + + @Override + public R accept(AstVisitor visitor, C context) { + return visitor.visitAliasedRelation(this, context); + } + + @Override + public String toString() { + return toStringHelper(this) + .add("relation", relation) + .add("alias", alias) + .add("columnNames", columnNames) + .omitNullValues() + .toString(); + } + + @Override + public boolean equals(Object o) { + if (this == o) { + return true; + } + if (o == null || getClass() != o.getClass()) { + return false; + } + + AliasedRelation that = (AliasedRelation) o; + return Objects.equals(relation, that.relation) + && Objects.equals(alias, that.alias) + && Objects.equals(columnNames, that.columnNames); + } + + @Override + public int hashCode() { + return Objects.hash(relation, alias, columnNames); + } +} diff --git a/ksql-core/src/main/java/io/confluent/ksql/parser/tree/AllColumns.java b/ksql-core/src/main/java/io/confluent/ksql/parser/tree/AllColumns.java new file mode 100644 index 000000000000..91b69b8cbf74 --- /dev/null +++ b/ksql-core/src/main/java/io/confluent/ksql/parser/tree/AllColumns.java @@ -0,0 +1,77 @@ +/** + * Copyright 2017 Confluent Inc. + * + **/ + +package io.confluent.ksql.parser.tree; + +import java.util.Objects; +import java.util.Optional; + +import static java.util.Objects.requireNonNull; + +public class AllColumns + extends SelectItem { + + private final Optional prefix; + + public AllColumns() { + super(Optional.empty()); + prefix = Optional.empty(); + } + + public AllColumns(NodeLocation location) { + super(Optional.of(location)); + prefix = Optional.empty(); + } + + public AllColumns(QualifiedName prefix) { + this(Optional.empty(), prefix); + } + + public AllColumns(NodeLocation location, QualifiedName prefix) { + this(Optional.of(location), prefix); + } + + private AllColumns(Optional location, QualifiedName prefix) { + super(location); + requireNonNull(prefix, "prefix is null"); + this.prefix = Optional.of(prefix); + } + + public Optional getPrefix() { + return prefix; + } + + @Override + public R accept(AstVisitor visitor, C context) { + return visitor.visitAllColumns(this, context); + } + + @Override + public boolean equals(Object o) { + if (this == o) { + return true; + } + if (o == null || getClass() != o.getClass()) { + return false; + } + + AllColumns that = (AllColumns) o; + return Objects.equals(prefix, that.prefix); + } + + @Override + public int hashCode() { + return prefix.hashCode(); + } + + @Override + public String toString() { + if (prefix.isPresent()) { + return prefix.get() + ".*"; + } + + return "*"; + } +} diff --git a/ksql-core/src/main/java/io/confluent/ksql/parser/tree/ArithmeticBinaryExpression.java b/ksql-core/src/main/java/io/confluent/ksql/parser/tree/ArithmeticBinaryExpression.java new file mode 100644 index 000000000000..e1b7ff6162bc --- /dev/null +++ b/ksql-core/src/main/java/io/confluent/ksql/parser/tree/ArithmeticBinaryExpression.java @@ -0,0 +1,88 @@ +/** + * Copyright 2017 Confluent Inc. + * + **/ + +package io.confluent.ksql.parser.tree; + +import java.util.Objects; +import java.util.Optional; + +public class ArithmeticBinaryExpression + extends Expression { + + public enum Type { + ADD("+"), + SUBTRACT("-"), + MULTIPLY("*"), + DIVIDE("/"), + MODULUS("%"); + private final String value; + + Type(String value) { + this.value = value; + } + + public String getValue() { + return value; + } + } + + private final Type type; + private final Expression left; + private final Expression right; + + public ArithmeticBinaryExpression(Type type, Expression left, Expression right) { + this(Optional.empty(), type, left, right); + } + + public ArithmeticBinaryExpression(NodeLocation location, Type type, Expression left, + Expression right) { + this(Optional.of(location), type, left, right); + } + + private ArithmeticBinaryExpression(Optional location, Type type, Expression left, + Expression right) { + super(location); + this.type = type; + this.left = left; + this.right = right; + } + + public Type getType() { + return type; + } + + public Expression getLeft() { + return left; + } + + public Expression getRight() { + return right; + } + + @Override + public R accept(AstVisitor visitor, C context) { + return visitor.visitArithmeticBinary(this, context); + } + + @Override + public boolean equals(Object o) { + if (this == o) { + return true; + } + if (o == null || getClass() != o.getClass()) { + return false; + } + + ArithmeticBinaryExpression that = (ArithmeticBinaryExpression) o; + return (type == that.type) + && Objects.equals(left, that.left) + && Objects.equals(right, that.right); + } + + @Override + public int hashCode() { + return Objects.hash(type, left, right); + } +} diff --git a/ksql-core/src/main/java/io/confluent/ksql/parser/tree/ArithmeticUnaryExpression.java b/ksql-core/src/main/java/io/confluent/ksql/parser/tree/ArithmeticUnaryExpression.java new file mode 100644 index 000000000000..aa121160bdcd --- /dev/null +++ b/ksql-core/src/main/java/io/confluent/ksql/parser/tree/ArithmeticUnaryExpression.java @@ -0,0 +1,88 @@ +/** + * Copyright 2017 Confluent Inc. + * + **/ + +package io.confluent.ksql.parser.tree; + +import java.util.Objects; +import java.util.Optional; + +import static java.util.Objects.requireNonNull; + +public class ArithmeticUnaryExpression + extends Expression { + + public enum Sign { + PLUS, + MINUS + } + + private final Expression value; + private final Sign sign; + + public ArithmeticUnaryExpression(Sign sign, Expression value) { + this(Optional.empty(), sign, value); + } + + public ArithmeticUnaryExpression(NodeLocation location, Sign sign, Expression value) { + this(Optional.of(location), sign, value); + } + + private ArithmeticUnaryExpression(Optional location, Sign sign, Expression value) { + super(location); + requireNonNull(value, "value is null"); + requireNonNull(sign, "sign is null"); + + this.value = value; + this.sign = sign; + } + + public static ArithmeticUnaryExpression positive(NodeLocation location, Expression value) { + return new ArithmeticUnaryExpression(Optional.of(location), Sign.PLUS, value); + } + + public static ArithmeticUnaryExpression negative(NodeLocation location, Expression value) { + return new ArithmeticUnaryExpression(Optional.of(location), Sign.MINUS, value); + } + + public static ArithmeticUnaryExpression positive(Expression value) { + return new ArithmeticUnaryExpression(Optional.empty(), Sign.PLUS, value); + } + + public static ArithmeticUnaryExpression negative(Expression value) { + return new ArithmeticUnaryExpression(Optional.empty(), Sign.MINUS, value); + } + + public Expression getValue() { + return value; + } + + public Sign getSign() { + return sign; + } + + @Override + public R accept(AstVisitor visitor, C context) { + return visitor.visitArithmeticUnary(this, context); + } + + @Override + public boolean equals(Object o) { + if (this == o) { + return true; + } + if (o == null || getClass() != o.getClass()) { + return false; + } + + ArithmeticUnaryExpression that = (ArithmeticUnaryExpression) o; + return Objects.equals(value, that.value) + && (sign == that.sign); + } + + @Override + public int hashCode() { + return Objects.hash(value, sign); + } +} diff --git a/ksql-core/src/main/java/io/confluent/ksql/parser/tree/AstVisitor.java b/ksql-core/src/main/java/io/confluent/ksql/parser/tree/AstVisitor.java new file mode 100644 index 000000000000..41edfe4b87ca --- /dev/null +++ b/ksql-core/src/main/java/io/confluent/ksql/parser/tree/AstVisitor.java @@ -0,0 +1,397 @@ +/** + * Copyright 2017 Confluent Inc. + * + **/ + +package io.confluent.ksql.parser.tree; + +import javax.annotation.Nullable; + +public abstract class AstVisitor { + + public R process(Node node, @Nullable C context) { + return node.accept(this, context); + } + + protected R visitNode(Node node, C context) { + return null; + } + + protected R visitExpression(Expression node, C context) { + return visitNode(node, context); + } + + + protected R visitExtract(Extract node, C context) { + return visitExpression(node, context); + } + + protected R visitArithmeticBinary(ArithmeticBinaryExpression node, C context) { + return visitExpression(node, context); + } + + protected R visitBetweenPredicate(BetweenPredicate node, C context) { + return visitExpression(node, context); + } + + + protected R visitComparisonExpression(ComparisonExpression node, C context) { + return visitExpression(node, context); + } + + protected R visitLiteral(Literal node, C context) { + return visitExpression(node, context); + } + + protected R visitDoubleLiteral(DoubleLiteral node, C context) { + return visitLiteral(node, context); + } + + protected R visitDecimalLiteral(DecimalLiteral node, C context) { + return visitLiteral(node, context); + } + + protected R visitStatements(Statements node, C context) { + return visitNode(node, context); + } + + protected R visitStatement(Statement node, C context) { + return visitNode(node, context); + } + + protected R visitQuery(Query node, C context) { + return visitStatement(node, context); + } + + protected R visitExplain(Explain node, C context) { + return visitStatement(node, context); + } + + protected R visitShowSchemas(ShowSchemas node, C context) { + return visitStatement(node, context); + } + + protected R visitShowCatalogs(ShowCatalogs node, C context) { + return visitStatement(node, context); + } + + protected R visitShowColumns(ShowColumns node, C context) { + return visitStatement(node, context); + } + + protected R visitShowPartitions(ShowPartitions node, C context) { + return visitStatement(node, context); + } + + protected R visitShowCreate(ShowCreate node, C context) { + return visitStatement(node, context); + } + + protected R visitShowFunctions(ShowFunctions node, C context) { + return visitStatement(node, context); + } + + protected R visitShowSession(ShowSession node, C context) { + return visitStatement(node, context); + } + + protected R visitSetSession(SetSession node, C context) { + return visitStatement(node, context); + } + + protected R visitGenericLiteral(GenericLiteral node, C context) { + return visitLiteral(node, context); + } + + protected R visitTimeLiteral(TimeLiteral node, C context) { + return visitLiteral(node, context); + } + + protected R visitExplainOption(ExplainOption node, C context) { + return visitNode(node, context); + } + + protected R visitWith(With node, C context) { + return visitNode(node, context); + } + + protected R visitWithQuery(WithQuery node, C context) { + return visitNode(node, context); + } + + protected R visitSelect(Select node, C context) { + return visitNode(node, context); + } + + protected R visitRelation(Relation node, C context) { + return visitNode(node, context); + } + + protected R visitQueryBody(QueryBody node, C context) { + return visitRelation(node, context); + } + + protected R visitQuerySpecification(QuerySpecification node, C context) { + return visitQueryBody(node, context); + } + + protected R visitSetOperation(SetOperation node, C context) { + return visitQueryBody(node, context); + } + + protected R visitUnion(Union node, C context) { + return visitSetOperation(node, context); + } + + protected R visitIntersect(Intersect node, C context) { + return visitSetOperation(node, context); + } + + protected R visitExcept(Except node, C context) { + return visitSetOperation(node, context); + } + + protected R visitTimestampLiteral(TimestampLiteral node, C context) { + return visitLiteral(node, context); + } + + protected R visitWhenClause(WhenClause node, C context) { + return visitExpression(node, context); + } + + protected R visitIntervalLiteral(IntervalLiteral node, C context) { + return visitLiteral(node, context); + } + + protected R visitInPredicate(InPredicate node, C context) { + return visitExpression(node, context); + } + + protected R visitFunctionCall(FunctionCall node, C context) { + return visitExpression(node, context); + } + + protected R visitLambdaExpression(LambdaExpression node, C context) { + return visitExpression(node, context); + } + + protected R visitSimpleCaseExpression(SimpleCaseExpression node, C context) { + return visitExpression(node, context); + } + + protected R visitStringLiteral(StringLiteral node, C context) { + return visitLiteral(node, context); + } + + protected R visitBinaryLiteral(BinaryLiteral node, C context) { + return visitLiteral(node, context); + } + + protected R visitBooleanLiteral(BooleanLiteral node, C context) { + return visitLiteral(node, context); + } + + protected R visitInListExpression(InListExpression node, C context) { + return visitExpression(node, context); + } + + protected R visitQualifiedNameReference(QualifiedNameReference node, C context) { + return visitExpression(node, context); + } + + protected R visitDereferenceExpression(DereferenceExpression node, C context) { + return visitExpression(node, context); + } + + protected R visitNullIfExpression(NullIfExpression node, C context) { + return visitExpression(node, context); + } + + protected R visitNullLiteral(NullLiteral node, C context) { + return visitLiteral(node, context); + } + + protected R visitArithmeticUnary(ArithmeticUnaryExpression node, C context) { + return visitExpression(node, context); + } + + protected R visitNotExpression(NotExpression node, C context) { + return visitExpression(node, context); + } + + protected R visitSelectItem(SelectItem node, C context) { + return visitNode(node, context); + } + + protected R visitSingleColumn(SingleColumn node, C context) { + return visitSelectItem(node, context); + } + + protected R visitAllColumns(AllColumns node, C context) { + return visitSelectItem(node, context); + } + + protected R visitSearchedCaseExpression(SearchedCaseExpression node, C context) { + return visitExpression(node, context); + } + + protected R visitLikePredicate(LikePredicate node, C context) { + return visitExpression(node, context); + } + + protected R visitIsNotNullPredicate(IsNotNullPredicate node, C context) { + return visitExpression(node, context); + } + + protected R visitIsNullPredicate(IsNullPredicate node, C context) { + return visitExpression(node, context); + } + + protected R visitSubscriptExpression(SubscriptExpression node, C context) { + return visitExpression(node, context); + } + + protected R visitLongLiteral(LongLiteral node, C context) { + return visitLiteral(node, context); + } + + protected R visitLogicalBinaryExpression(LogicalBinaryExpression node, C context) { + return visitExpression(node, context); + } + + protected R visitSubqueryExpression(SubqueryExpression node, C context) { + return visitExpression(node, context); + } + + protected R visitSortItem(SortItem node, C context) { + return visitNode(node, context); + } + + protected R visitTable(Table node, C context) { + return visitQueryBody(node, context); + } + + protected R visitValues(Values node, C context) { + return visitQueryBody(node, context); + } + + protected R visitRow(Row node, C context) { + return visitNode(node, context); + } + + protected R visitTableSubquery(TableSubquery node, C context) { + return visitQueryBody(node, context); + } + + protected R visitAliasedRelation(AliasedRelation node, C context) { + return visitRelation(node, context); + } + + protected R visitSampledRelation(SampledRelation node, C context) { + return visitRelation(node, context); + } + + protected R visitJoin(Join node, C context) { + return visitRelation(node, context); + } + + protected R visitExists(ExistsPredicate node, C context) { + return visitExpression(node, context); + } + + protected R visitCast(Cast node, C context) { + return visitExpression(node, context); + } + + protected R visitFieldReference(FieldReference node, C context) { + return visitExpression(node, context); + } + + protected R visitWindow(Window node, C context) { + return visitNode(node, context); + } + + protected R visitWindowFrame(WindowFrame node, C context) { + return visitNode(node, context); + } + + protected R visitFrameBound(FrameBound node, C context) { + return visitNode(node, context); + } + + protected R visitTableElement(TableElement node, C context) { + return visitNode(node, context); + } + + protected R visitCreateTopic(RegisterTopic node, C context) { + return visitStatement(node, context); + } + + protected R visitCreateStream(CreateStream node, C context) { + return visitStatement(node, context); + } + + protected R visitCreateStreamAsSelect(CreateStreamAsSelect node, C context) { + return visitStatement(node, context); + } + + protected R visitCreateTable(CreateTable node, C context) { + return visitStatement(node, context); + } + + protected R visitCreateTableAsSelect(CreateTableAsSelect node, C context) { + return visitStatement(node, context); + } + + protected R visitDropTopic(DropTopic node, C context) { + return visitStatement(node, context); + } + + protected R visitDropStream(DropStream node, C context) { + return visitStatement(node, context); + } + + protected R visitDropTable(DropTable node, C context) { + return visitStatement(node, context); + } + + protected R visitRenameTable(RenameTable node, C context) { + return visitStatement(node, context); + } + + protected R visitRenameColumn(RenameColumn node, C context) { + return visitStatement(node, context); + } + + protected R visitCreateView(CreateView node, C context) { + return visitStatement(node, context); + } + + protected R visitDropView(DropView node, C context) { + return visitStatement(node, context); + } + + protected R visitDelete(Delete node, C context) { + return visitStatement(node, context); + } + + protected R visitGroupBy(GroupBy node, C context) { + return visitNode(node, context); + } + + protected R visitGroupingElement(GroupingElement node, C context) { + return visitNode(node, context); + } + + protected R visitGroupingSets(GroupingSets node, C context) { + return visitGroupingElement(node, context); + } + + protected R visitSimpleGroupBy(SimpleGroupBy node, C context) { + return visitGroupingElement(node, context); + } + + protected R visitSymbolReference(SymbolReference node, C context) { + return visitExpression(node, context); + } +} diff --git a/ksql-core/src/main/java/io/confluent/ksql/parser/tree/BetweenPredicate.java b/ksql-core/src/main/java/io/confluent/ksql/parser/tree/BetweenPredicate.java new file mode 100644 index 000000000000..100502e8e5bf --- /dev/null +++ b/ksql-core/src/main/java/io/confluent/ksql/parser/tree/BetweenPredicate.java @@ -0,0 +1,76 @@ +/** + * Copyright 2017 Confluent Inc. + * + **/ + +package io.confluent.ksql.parser.tree; + +import java.util.Objects; +import java.util.Optional; + +import static java.util.Objects.requireNonNull; + +public class BetweenPredicate + extends Expression { + + private final Expression value; + private final Expression min; + private final Expression max; + + public BetweenPredicate(Expression value, Expression min, Expression max) { + this(Optional.empty(), value, min, max); + } + + public BetweenPredicate(NodeLocation location, Expression value, Expression min, Expression max) { + this(Optional.of(location), value, min, max); + } + + private BetweenPredicate(Optional location, Expression value, Expression min, + Expression max) { + super(location); + requireNonNull(value, "value is null"); + requireNonNull(min, "min is null"); + requireNonNull(max, "max is null"); + + this.value = value; + this.min = min; + this.max = max; + } + + public Expression getValue() { + return value; + } + + public Expression getMin() { + return min; + } + + public Expression getMax() { + return max; + } + + @Override + public R accept(AstVisitor visitor, C context) { + return visitor.visitBetweenPredicate(this, context); + } + + @Override + public boolean equals(Object o) { + if (this == o) { + return true; + } + if (o == null || getClass() != o.getClass()) { + return false; + } + + BetweenPredicate that = (BetweenPredicate) o; + return Objects.equals(value, that.value) + && Objects.equals(min, that.min) + && Objects.equals(max, that.max); + } + + @Override + public int hashCode() { + return Objects.hash(value, min, max); + } +} diff --git a/ksql-core/src/main/java/io/confluent/ksql/parser/tree/BinaryLiteral.java b/ksql-core/src/main/java/io/confluent/ksql/parser/tree/BinaryLiteral.java new file mode 100644 index 000000000000..fe7326ab8446 --- /dev/null +++ b/ksql-core/src/main/java/io/confluent/ksql/parser/tree/BinaryLiteral.java @@ -0,0 +1,84 @@ +/** + * Copyright 2017 Confluent Inc. + * + **/ + +package io.confluent.ksql.parser.tree; + +import com.google.common.io.BaseEncoding; +import io.airlift.slice.Slice; +import io.airlift.slice.Slices; +import io.confluent.ksql.parser.ParsingException; + +import java.util.Objects; +import java.util.Optional; +import java.util.regex.Pattern; + +import static java.util.Objects.requireNonNull; + +public class BinaryLiteral + extends Literal { + + // the grammar could possibly include whitespace in the value it passes to us + private static final Pattern WHITESPACE_PATTERN = Pattern.compile("[ \\r\\n\\t]"); + private static final Pattern NOT_HEX_DIGIT_PATTERN = Pattern.compile(".*[^A-F0-9].*"); + + private final Slice value; + + public BinaryLiteral(String value) { + this(Optional.empty(), value); + } + + public BinaryLiteral(Optional location, String value) { + super(location); + requireNonNull(value, "value is null"); + String hexString = WHITESPACE_PATTERN.matcher(value).replaceAll(""); + if (NOT_HEX_DIGIT_PATTERN.matcher(hexString).matches()) { + throw new ParsingException("Binary literal can only contain hexadecimal digits", + location.get()); + } + if (hexString.length() % 2 != 0) { + throw new ParsingException("Binary literal must contain an even number of digits", + location.get()); + } + this.value = Slices.wrappedBuffer(BaseEncoding.base16().decode(hexString)); + } + + public BinaryLiteral(NodeLocation location, String value) { + this(Optional.of(location), value); + } + + /** + * Return the valued as a hex-formatted string with upper-case characters + */ + public String toHexString() { + return BaseEncoding.base16().encode(value.getBytes()); + } + + public Slice getValue() { + return value; + } + + @Override + public R accept(AstVisitor visitor, C context) { + return visitor.visitBinaryLiteral(this, context); + } + + @Override + public boolean equals(Object o) { + if (this == o) { + return true; + } + if (o == null || getClass() != o.getClass()) { + return false; + } + + BinaryLiteral that = (BinaryLiteral) o; + return Objects.equals(value, that.value); + } + + @Override + public int hashCode() { + return value.hashCode(); + } +} diff --git a/ksql-core/src/main/java/io/confluent/ksql/parser/tree/BooleanLiteral.java b/ksql-core/src/main/java/io/confluent/ksql/parser/tree/BooleanLiteral.java new file mode 100644 index 000000000000..cf0174543e07 --- /dev/null +++ b/ksql-core/src/main/java/io/confluent/ksql/parser/tree/BooleanLiteral.java @@ -0,0 +1,66 @@ +/** + * Copyright 2017 Confluent Inc. + * + **/ + +package io.confluent.ksql.parser.tree; + +import com.google.common.base.Preconditions; + +import java.util.Objects; +import java.util.Optional; + +import static java.util.Locale.ENGLISH; +import static java.util.Objects.requireNonNull; + +public class BooleanLiteral + extends Literal { + + public static final BooleanLiteral TRUE_LITERAL = new BooleanLiteral(Optional.empty(), "true"); + public static final BooleanLiteral FALSE_LITERAL = new BooleanLiteral(Optional.empty(), "false"); + + private final boolean value; + + public BooleanLiteral(String value) { + this(Optional.empty(), value); + } + + public BooleanLiteral(NodeLocation location, String value) { + this(Optional.of(location), value); + } + + private BooleanLiteral(Optional location, String value) { + super(location); + requireNonNull(value, "value is null"); + Preconditions.checkArgument( + value.toLowerCase(ENGLISH).equals("true") || value.toLowerCase(ENGLISH).equals("false")); + + this.value = value.toLowerCase(ENGLISH).equals("true"); + } + + public boolean getValue() { + return value; + } + + @Override + public R accept(AstVisitor visitor, C context) { + return visitor.visitBooleanLiteral(this, context); + } + + @Override + public int hashCode() { + return Objects.hash(value); + } + + @Override + public boolean equals(Object obj) { + if (this == obj) { + return true; + } + if (obj == null || getClass() != obj.getClass()) { + return false; + } + BooleanLiteral other = (BooleanLiteral) obj; + return Objects.equals(this.value, other.value); + } +} diff --git a/ksql-core/src/main/java/io/confluent/ksql/parser/tree/Cast.java b/ksql-core/src/main/java/io/confluent/ksql/parser/tree/Cast.java new file mode 100644 index 000000000000..eb8ecd7f9b9f --- /dev/null +++ b/ksql-core/src/main/java/io/confluent/ksql/parser/tree/Cast.java @@ -0,0 +1,98 @@ +/** + * Copyright 2017 Confluent Inc. + * + **/ + +package io.confluent.ksql.parser.tree; + +import java.util.Objects; +import java.util.Optional; + +import static java.util.Objects.requireNonNull; + +public final class Cast + extends Expression { + + private final Expression expression; + private final String type; + private final boolean safe; + private final boolean typeOnly; + + public Cast(Expression expression, String type) { + this(Optional.empty(), expression, type, false, false); + } + + public Cast(Expression expression, String type, boolean safe) { + this(Optional.empty(), expression, type, safe, false); + } + + public Cast(Expression expression, String type, boolean safe, boolean typeOnly) { + this(Optional.empty(), expression, type, safe, typeOnly); + } + + public Cast(NodeLocation location, Expression expression, String type) { + this(Optional.of(location), expression, type, false, false); + } + + public Cast(NodeLocation location, Expression expression, String type, boolean safe) { + this(Optional.of(location), expression, type, safe, false); + } + + public Cast(NodeLocation location, Expression expression, String type, boolean safe, + boolean typeOnly) { + this(Optional.of(location), expression, type, safe, typeOnly); + } + + private Cast(Optional location, Expression expression, String type, boolean safe, + boolean typeOnly) { + super(location); + requireNonNull(expression, "expression is null"); + requireNonNull(type, "type is null"); + + this.expression = expression; + this.type = type; + this.safe = safe; + this.typeOnly = typeOnly; + } + + public Expression getExpression() { + return expression; + } + + public String getType() { + return type; + } + + public boolean isSafe() { + return safe; + } + + public boolean isTypeOnly() { + return typeOnly; + } + + @Override + public R accept(AstVisitor visitor, C context) { + return visitor.visitCast(this, context); + } + + @Override + public boolean equals(Object obj) { + if (this == obj) { + return true; + } + if (obj == null || getClass() != obj.getClass()) { + return false; + } + Cast o = (Cast) obj; + return Objects.equals(this.expression, o.expression) + && Objects.equals(this.type, o.type) + && Objects.equals(this.safe, o.safe) + && Objects.equals(this.typeOnly, o.typeOnly); + } + + @Override + public int hashCode() { + return Objects.hash(expression, type, safe, typeOnly); + } +} diff --git a/ksql-core/src/main/java/io/confluent/ksql/parser/tree/ComparisonExpression.java b/ksql-core/src/main/java/io/confluent/ksql/parser/tree/ComparisonExpression.java new file mode 100644 index 000000000000..6708a712f5e1 --- /dev/null +++ b/ksql-core/src/main/java/io/confluent/ksql/parser/tree/ComparisonExpression.java @@ -0,0 +1,136 @@ +/** + * Copyright 2017 Confluent Inc. + * + **/ + +package io.confluent.ksql.parser.tree; + +import java.util.Objects; +import java.util.Optional; + +import static java.util.Objects.requireNonNull; + +public class ComparisonExpression + extends Expression { + + public enum Type { + EQUAL("="), + NOT_EQUAL("<>"), + LESS_THAN("<"), + LESS_THAN_OR_EQUAL("<="), + GREATER_THAN(">"), + GREATER_THAN_OR_EQUAL(">="), + IS_DISTINCT_FROM("IS DISTINCT FROM"); + + private final String value; + + Type(String value) { + this.value = value; + } + + public String getValue() { + return value; + } + + public Type flip() { + switch (this) { + case EQUAL: + return EQUAL; + case NOT_EQUAL: + return NOT_EQUAL; + case LESS_THAN: + return GREATER_THAN; + case LESS_THAN_OR_EQUAL: + return GREATER_THAN_OR_EQUAL; + case GREATER_THAN: + return LESS_THAN; + case GREATER_THAN_OR_EQUAL: + return LESS_THAN_OR_EQUAL; + case IS_DISTINCT_FROM: + return IS_DISTINCT_FROM; + default: + throw new IllegalArgumentException("Unsupported comparison: " + this); + } + } + + public Type negate() { + switch (this) { + case EQUAL: + return NOT_EQUAL; + case NOT_EQUAL: + return EQUAL; + case LESS_THAN: + return GREATER_THAN_OR_EQUAL; + case LESS_THAN_OR_EQUAL: + return GREATER_THAN; + case GREATER_THAN: + return LESS_THAN_OR_EQUAL; + case GREATER_THAN_OR_EQUAL: + return LESS_THAN; + default: + throw new IllegalArgumentException("Unsupported comparison: " + this); + } + } + } + + private final Type type; + private final Expression left; + private final Expression right; + + public ComparisonExpression(Type type, Expression left, Expression right) { + this(Optional.empty(), type, left, right); + } + + public ComparisonExpression(NodeLocation location, Type type, Expression left, Expression right) { + this(Optional.of(location), type, left, right); + } + + private ComparisonExpression(Optional location, Type type, Expression left, + Expression right) { + super(location); + requireNonNull(type, "type is null"); + requireNonNull(left, "left is null"); + requireNonNull(right, "right is null"); + + this.type = type; + this.left = left; + this.right = right; + } + + public Type getType() { + return type; + } + + public Expression getLeft() { + return left; + } + + public Expression getRight() { + return right; + } + + @Override + public R accept(AstVisitor visitor, C context) { + return visitor.visitComparisonExpression(this, context); + } + + @Override + public boolean equals(Object o) { + if (this == o) { + return true; + } + if (o == null || getClass() != o.getClass()) { + return false; + } + + ComparisonExpression that = (ComparisonExpression) o; + return (type == that.type) + && Objects.equals(left, that.left) + && Objects.equals(right, that.right); + } + + @Override + public int hashCode() { + return Objects.hash(type, left, right); + } +} diff --git a/ksql-core/src/main/java/io/confluent/ksql/parser/tree/CreateStream.java b/ksql-core/src/main/java/io/confluent/ksql/parser/tree/CreateStream.java new file mode 100644 index 000000000000..69db6373984c --- /dev/null +++ b/ksql-core/src/main/java/io/confluent/ksql/parser/tree/CreateStream.java @@ -0,0 +1,95 @@ +/** + * Copyright 2017 Confluent Inc. + **/ + +package io.confluent.ksql.parser.tree; + +import com.google.common.collect.ImmutableList; +import com.google.common.collect.ImmutableMap; + +import java.util.List; +import java.util.Map; +import java.util.Objects; +import java.util.Optional; + +import static com.google.common.base.MoreObjects.toStringHelper; +import static java.util.Objects.requireNonNull; + +public class CreateStream extends AbstractStreamCreateStatement { + + private final QualifiedName name; + private final List elements; + private final boolean notExists; + private final Map properties; + + public CreateStream(QualifiedName name, List elements, boolean notExists, + Map properties) { + this(Optional.empty(), name, elements, notExists, properties); + } + + public CreateStream(NodeLocation location, QualifiedName name, List elements, + boolean notExists, Map properties) { + this(Optional.of(location), name, elements, notExists, properties); + } + + private CreateStream(Optional location, QualifiedName name, + List elements, boolean notExists, + Map properties) { + super(location); + this.name = requireNonNull(name, "stream is null"); + this.elements = ImmutableList.copyOf(requireNonNull(elements, "elements is null")); + this.notExists = notExists; + this.properties = ImmutableMap.copyOf(requireNonNull(properties, "properties is null")); + } + + public QualifiedName getName() { + return name; + } + + public List getElements() { + return elements; + } + + public boolean isNotExists() { + return notExists; + } + + public Map getProperties() { + return properties; + } + + @Override + public R accept(AstVisitor visitor, C context) { + return visitor.visitCreateStream(this, context); + } + + @Override + public int hashCode() { + return Objects.hash(name, elements, notExists, properties); + } + + @Override + public boolean equals(Object obj) { + if (this == obj) { + return true; + } + if ((obj == null) || (getClass() != obj.getClass())) { + return false; + } + CreateStream o = (CreateStream) obj; + return Objects.equals(name, o.name) + && Objects.equals(elements, o.elements) + && Objects.equals(notExists, o.notExists) + && Objects.equals(properties, o.properties); + } + + @Override + public String toString() { + return toStringHelper(this) + .add("name", name) + .add("elements", elements) + .add("notExists", notExists) + .add("properties", properties) + .toString(); + } +} diff --git a/ksql-core/src/main/java/io/confluent/ksql/parser/tree/CreateStreamAsSelect.java b/ksql-core/src/main/java/io/confluent/ksql/parser/tree/CreateStreamAsSelect.java new file mode 100644 index 000000000000..ce7ecfab6ce1 --- /dev/null +++ b/ksql-core/src/main/java/io/confluent/ksql/parser/tree/CreateStreamAsSelect.java @@ -0,0 +1,101 @@ +/** + * Copyright 2017 Confluent Inc. + **/ + +package io.confluent.ksql.parser.tree; + +import com.google.common.collect.ImmutableMap; + +import java.util.Map; +import java.util.Objects; +import java.util.Optional; + +import static com.google.common.base.MoreObjects.toStringHelper; +import static java.util.Objects.requireNonNull; + +public class CreateStreamAsSelect extends Statement { + + private final QualifiedName name; + private final Query query; + private final boolean notExists; + private final Map properties; + private final Optional partitionByColumn; + + public CreateStreamAsSelect(QualifiedName name, Query query, boolean notExists, + Map properties, Optional partitionByColumn) { + this(Optional.empty(), name, query, notExists, properties, partitionByColumn); + } + + public CreateStreamAsSelect(NodeLocation location, QualifiedName name, Query query, + boolean notExists, Map properties, + Optional partitionByColumn) { + this(Optional.of(location), name, query, notExists, properties, partitionByColumn); + } + + private CreateStreamAsSelect(Optional location, QualifiedName name, + Query query, boolean notExists, + Map properties, Optional partitionByColumn) { + super(location); + this.name = requireNonNull(name, "stream is null"); + this.query = query; + this.notExists = notExists; + this.properties = ImmutableMap.copyOf( + requireNonNull(properties, "properties is null")); + this.partitionByColumn = partitionByColumn; + } + + public QualifiedName getName() { + return name; + } + + public boolean isNotExists() { + return notExists; + } + + public Query getQuery() { + return query; + } + + public Map getProperties() { + return properties; + } + + public Optional getPartitionByColumn() { + return partitionByColumn; + } + + @Override + public R accept(AstVisitor visitor, C context) { + return visitor.visitCreateStreamAsSelect(this, context); + } + + @Override + public int hashCode() { + return Objects.hash(name, query, notExists, properties); + } + + @Override + public boolean equals(Object obj) { + if (this == obj) { + return true; + } + if ((obj == null) || (getClass() != obj.getClass())) { + return false; + } + CreateStreamAsSelect o = (CreateStreamAsSelect) obj; + return Objects.equals(name, o.name) + && Objects.equals(query, o.query) + && Objects.equals(notExists, o.notExists) + && Objects.equals(properties, o.properties); + } + + @Override + public String toString() { + return toStringHelper(this) + .add("name", name) + .add("query", query) + .add("notExists", notExists) + .add("properties", properties) + .toString(); + } +} diff --git a/ksql-core/src/main/java/io/confluent/ksql/parser/tree/CreateTable.java b/ksql-core/src/main/java/io/confluent/ksql/parser/tree/CreateTable.java new file mode 100644 index 000000000000..e1555891538b --- /dev/null +++ b/ksql-core/src/main/java/io/confluent/ksql/parser/tree/CreateTable.java @@ -0,0 +1,97 @@ +/** + * Copyright 2017 Confluent Inc. + * + **/ + +package io.confluent.ksql.parser.tree; + +import com.google.common.collect.ImmutableList; +import com.google.common.collect.ImmutableMap; + +import java.util.List; +import java.util.Map; +import java.util.Objects; +import java.util.Optional; + +import static com.google.common.base.MoreObjects.toStringHelper; +import static java.util.Objects.requireNonNull; + +public class CreateTable + extends AbstractStreamCreateStatement { + + private final QualifiedName name; + private final List elements; + private final boolean notExists; + private final Map properties; + + public CreateTable(QualifiedName name, List elements, boolean notExists, + Map properties) { + this(Optional.empty(), name, elements, notExists, properties); + } + + public CreateTable(NodeLocation location, QualifiedName name, List elements, + boolean notExists, Map properties) { + this(Optional.of(location), name, elements, notExists, properties); + } + + private CreateTable(Optional location, QualifiedName name, + List elements, boolean notExists, + Map properties) { + super(location); + this.name = requireNonNull(name, "table is null"); + this.elements = ImmutableList.copyOf(requireNonNull(elements, "elements is null")); + this.notExists = notExists; + this.properties = ImmutableMap.copyOf(requireNonNull(properties, "properties is null")); + } + + public QualifiedName getName() { + return name; + } + + public List getElements() { + return elements; + } + + public boolean isNotExists() { + return notExists; + } + + public Map getProperties() { + return properties; + } + + @Override + public R accept(AstVisitor visitor, C context) { + return visitor.visitCreateTable(this, context); + } + + @Override + public int hashCode() { + return Objects.hash(name, elements, notExists, properties); + } + + @Override + public boolean equals(Object obj) { + if (this == obj) { + return true; + } + if ((obj == null) || (getClass() != obj.getClass())) { + return false; + } + CreateTable o = (CreateTable) obj; + return Objects.equals(name, o.name) + && Objects.equals(elements, o.elements) + && Objects.equals(notExists, o.notExists) + && Objects.equals(properties, o.properties); + } + + @Override + public String toString() { + return toStringHelper(this) + .add("name", name) + .add("elements", elements) + .add("notExists", notExists) + .add("properties", properties) + .toString(); + } +} diff --git a/ksql-core/src/main/java/io/confluent/ksql/parser/tree/CreateTableAsSelect.java b/ksql-core/src/main/java/io/confluent/ksql/parser/tree/CreateTableAsSelect.java new file mode 100644 index 000000000000..b849b8cde70d --- /dev/null +++ b/ksql-core/src/main/java/io/confluent/ksql/parser/tree/CreateTableAsSelect.java @@ -0,0 +1,99 @@ +/** + * Copyright 2017 Confluent Inc. + * + **/ + +package io.confluent.ksql.parser.tree; + +import com.google.common.collect.ImmutableMap; + +import java.util.Map; +import java.util.Objects; +import java.util.Optional; + +import static com.google.common.base.MoreObjects.toStringHelper; +import static java.util.Objects.requireNonNull; + +public class CreateTableAsSelect + extends Statement { + + private final QualifiedName name; + private final Query query; + private final boolean notExists; + private final Map properties; + + public CreateTableAsSelect(QualifiedName name, Query query, boolean notExists, + Map properties) { + this(Optional.empty(), name, query, notExists, properties); + } + + public CreateTableAsSelect(NodeLocation location, QualifiedName name, Query query, + boolean notExists, + Map properties + ) { + this(Optional.of(location), name, query, notExists, properties); + } + + private CreateTableAsSelect(Optional location, QualifiedName name, Query query, + boolean notExists, + Map properties + ) { + super(location); + this.name = requireNonNull(name, "name is null"); + this.query = requireNonNull(query, "query is null"); + this.notExists = notExists; + this.properties = ImmutableMap + .copyOf(requireNonNull(properties, "properties is null")); + } + + public QualifiedName getName() { + return name; + } + + public Query getQuery() { + return query; + } + + public boolean isNotExists() { + return notExists; + } + + public Map getProperties() { + return properties; + } + + @Override + public R accept(AstVisitor visitor, C context) { + return visitor.visitCreateTableAsSelect(this, context); + } + + @Override + public int hashCode() { + return Objects.hash(name, query, properties); + } + + @Override + public boolean equals(Object obj) { + if (this == obj) { + return true; + } + if ((obj == null) || (getClass() != obj.getClass())) { + return false; + } + CreateTableAsSelect o = (CreateTableAsSelect) obj; + return Objects.equals(name, o.name) + && Objects.equals(query, o.query) + && Objects.equals(notExists, o.notExists) + && Objects.equals(properties, o.properties); + } + + @Override + public String toString() { + return toStringHelper(this) + .add("name", name) + .add("query", query) + .add("notExists", notExists) + .add("properties", properties) + .toString(); + } +} diff --git a/ksql-core/src/main/java/io/confluent/ksql/parser/tree/CreateView.java b/ksql-core/src/main/java/io/confluent/ksql/parser/tree/CreateView.java new file mode 100644 index 000000000000..b5010318322c --- /dev/null +++ b/ksql-core/src/main/java/io/confluent/ksql/parser/tree/CreateView.java @@ -0,0 +1,81 @@ +/** + * Copyright 2017 Confluent Inc. + * + **/ + +package io.confluent.ksql.parser.tree; + +import java.util.Objects; +import java.util.Optional; + +import static com.google.common.base.MoreObjects.toStringHelper; +import static java.util.Objects.requireNonNull; + +public class CreateView + extends Statement { + + private final QualifiedName name; + private final Query query; + private final boolean replace; + + public CreateView(QualifiedName name, Query query, boolean replace) { + this(Optional.empty(), name, query, replace); + } + + public CreateView(NodeLocation location, QualifiedName name, Query query, boolean replace) { + this(Optional.of(location), name, query, replace); + } + + private CreateView(Optional location, QualifiedName name, Query query, + boolean replace) { + super(location); + this.name = requireNonNull(name, "name is null"); + this.query = requireNonNull(query, "query is null"); + this.replace = replace; + } + + public QualifiedName getName() { + return name; + } + + public Query getQuery() { + return query; + } + + public boolean isReplace() { + return replace; + } + + @Override + public R accept(AstVisitor visitor, C context) { + return visitor.visitCreateView(this, context); + } + + @Override + public int hashCode() { + return Objects.hash(name, query, replace); + } + + @Override + public boolean equals(Object obj) { + if (this == obj) { + return true; + } + if ((obj == null) || (getClass() != obj.getClass())) { + return false; + } + CreateView o = (CreateView) obj; + return Objects.equals(name, o.name) + && Objects.equals(query, o.query) + && Objects.equals(replace, o.replace); + } + + @Override + public String toString() { + return toStringHelper(this) + .add("name", name) + .add("query", query) + .add("replace", replace) + .toString(); + } +} diff --git a/ksql-core/src/main/java/io/confluent/ksql/parser/tree/DecimalLiteral.java b/ksql-core/src/main/java/io/confluent/ksql/parser/tree/DecimalLiteral.java new file mode 100644 index 000000000000..1970d4d751d1 --- /dev/null +++ b/ksql-core/src/main/java/io/confluent/ksql/parser/tree/DecimalLiteral.java @@ -0,0 +1,56 @@ +/** + * Copyright 2017 Confluent Inc. + * + **/ + +package io.confluent.ksql.parser.tree; + +import java.util.Objects; +import java.util.Optional; + +import static java.util.Objects.requireNonNull; + +public class DecimalLiteral + extends Literal { + + private final String value; + + public DecimalLiteral(String value) { + this(Optional.empty(), value); + } + + public DecimalLiteral(NodeLocation location, String value) { + this(Optional.of(location), value); + } + + public DecimalLiteral(Optional location, String value) { + super(location); + this.value = requireNonNull(value, "value is null"); + } + + public String getValue() { + return value; + } + + @Override + public R accept(AstVisitor visitor, C context) { + return visitor.visitDecimalLiteral(this, context); + } + + @Override + public boolean equals(Object o) { + if (this == o) { + return true; + } + if (o == null || getClass() != o.getClass()) { + return false; + } + DecimalLiteral that = (DecimalLiteral) o; + return Objects.equals(value, that.value); + } + + @Override + public int hashCode() { + return Objects.hash(value); + } +} diff --git a/ksql-core/src/main/java/io/confluent/ksql/parser/tree/DefaultAstVisitor.java b/ksql-core/src/main/java/io/confluent/ksql/parser/tree/DefaultAstVisitor.java new file mode 100644 index 000000000000..ca2501374c30 --- /dev/null +++ b/ksql-core/src/main/java/io/confluent/ksql/parser/tree/DefaultAstVisitor.java @@ -0,0 +1,393 @@ +/** + * Copyright 2017 Confluent Inc. + * + **/ + +package io.confluent.ksql.parser.tree; + +import javax.annotation.Nullable; + +public abstract class DefaultAstVisitor + extends AstVisitor { + + public R process(Node node, @Nullable C context) { + return node.accept(this, context); + } + + protected R visitNode(Node node, C context) { + return null; + } + + protected R visitExpression(Expression node, C context) { + return visitNode(node, context); + } + + protected R visitExtract(Extract node, C context) { + return visitExpression(node, context); + } + + protected R visitArithmeticBinary(ArithmeticBinaryExpression node, C context) { + process(node.getLeft(), context); + process(node.getRight(), context); + return visitExpression(node, context); + } + + protected R visitBetweenPredicate(BetweenPredicate node, C context) { + return visitExpression(node, context); + } + + protected R visitComparisonExpression(ComparisonExpression node, C context) { + process(node.getLeft(), context); + process(node.getRight(), context); + return visitExpression(node, context); + } + + protected R visitLiteral(Literal node, C context) { + return visitExpression(node, context); + } + + protected R visitDoubleLiteral(DoubleLiteral node, C context) { + return null; + } + + protected R visitDecimalLiteral(DecimalLiteral node, C context) { + return null; + } + + protected R visitStatements(Statements node, C context) { + return visitNode(node, context); + } + + protected R visitStatement(Statement node, C context) { + return visitNode(node, context); + } + + protected R visitQuery(Query node, C context) { + return visitStatement(node, context); + } + + protected R visitExplain(Explain node, C context) { + return visitStatement(node, context); + } + + protected R visitShowSchemas(ShowSchemas node, C context) { + return visitStatement(node, context); + } + + protected R visitShowCatalogs(ShowCatalogs node, C context) { + return visitStatement(node, context); + } + + protected R visitShowColumns(ShowColumns node, C context) { + return visitStatement(node, context); + } + + protected R visitShowPartitions(ShowPartitions node, C context) { + return visitStatement(node, context); + } + + protected R visitShowCreate(ShowCreate node, C context) { + return visitStatement(node, context); + } + + protected R visitShowFunctions(ShowFunctions node, C context) { + return visitStatement(node, context); + } + + protected R visitShowSession(ShowSession node, C context) { + return visitStatement(node, context); + } + + protected R visitSetSession(SetSession node, C context) { + return visitStatement(node, context); + } + + protected R visitGenericLiteral(GenericLiteral node, C context) { + return visitLiteral(node, context); + } + + protected R visitTimeLiteral(TimeLiteral node, C context) { + return visitLiteral(node, context); + } + + protected R visitExplainOption(ExplainOption node, C context) { + return visitNode(node, context); + } + + protected R visitWith(With node, C context) { + return visitNode(node, context); + } + + protected R visitWithQuery(WithQuery node, C context) { + return visitNode(node, context); + } + + protected R visitSelect(Select node, C context) { + return visitNode(node, context); + } + + protected R visitRelation(Relation node, C context) { + return visitNode(node, context); + } + + protected R visitQueryBody(QueryBody node, C context) { + return visitRelation(node, context); + } + + protected R visitQuerySpecification(QuerySpecification node, C context) { + return visitQueryBody(node, context); + } + + protected R visitSetOperation(SetOperation node, C context) { + return visitQueryBody(node, context); + } + + protected R visitUnion(Union node, C context) { + return visitSetOperation(node, context); + } + + protected R visitIntersect(Intersect node, C context) { + return visitSetOperation(node, context); + } + + protected R visitExcept(Except node, C context) { + return visitSetOperation(node, context); + } + + protected R visitTimestampLiteral(TimestampLiteral node, C context) { + return visitLiteral(node, context); + } + + protected R visitWhenClause(WhenClause node, C context) { + return visitExpression(node, context); + } + + protected R visitIntervalLiteral(IntervalLiteral node, C context) { + return visitLiteral(node, context); + } + + protected R visitInPredicate(InPredicate node, C context) { + return visitExpression(node, context); + } + + protected R visitFunctionCall(FunctionCall node, C context) { + return visitExpression(node, context); + } + + protected R visitLambdaExpression(LambdaExpression node, C context) { + return visitExpression(node, context); + } + + protected R visitSimpleCaseExpression(SimpleCaseExpression node, C context) { + return visitExpression(node, context); + } + + protected R visitStringLiteral(StringLiteral node, C context) { + return visitLiteral(node, context); + } + + protected R visitBinaryLiteral(BinaryLiteral node, C context) { + return visitLiteral(node, context); + } + + protected R visitBooleanLiteral(BooleanLiteral node, C context) { + return visitLiteral(node, context); + } + + protected R visitInListExpression(InListExpression node, C context) { + return visitExpression(node, context); + } + + protected R visitQualifiedNameReference(QualifiedNameReference node, C context) { + return visitExpression(node, context); + } + + protected R visitDereferenceExpression(DereferenceExpression node, C context) { + return visitExpression(node, context); + } + + protected R visitNullIfExpression(NullIfExpression node, C context) { + return visitExpression(node, context); + } + + protected R visitNullLiteral(NullLiteral node, C context) { + return visitLiteral(node, context); + } + + protected R visitArithmeticUnary(ArithmeticUnaryExpression node, C context) { + process(node.getValue(), context); + return visitExpression(node, context); + } + + protected R visitNotExpression(NotExpression node, C context) { + return visitExpression(node, context); + } + + protected R visitSelectItem(SelectItem node, C context) { + return visitNode(node, context); + } + + protected R visitSingleColumn(SingleColumn node, C context) { + process(node.getExpression(), context); + return visitSelectItem(node, context); + } + + protected R visitAllColumns(AllColumns node, C context) { + return visitSelectItem(node, context); + } + + protected R visitSearchedCaseExpression(SearchedCaseExpression node, C context) { + return visitExpression(node, context); + } + + protected R visitLikePredicate(LikePredicate node, C context) { + process(node.getValue(), context); + return visitExpression(node, context); + } + + protected R visitIsNotNullPredicate(IsNotNullPredicate node, C context) { + return visitExpression(node, context); + } + + protected R visitIsNullPredicate(IsNullPredicate node, C context) { + return visitExpression(node, context); + } + + protected R visitSubscriptExpression(SubscriptExpression node, C context) { + return visitExpression(node, context); + } + + protected R visitLongLiteral(LongLiteral node, C context) { + return visitLiteral(node, context); + } + + protected R visitLogicalBinaryExpression(LogicalBinaryExpression node, C context) { + process(node.getLeft(), context); + process(node.getRight(), context); + return visitExpression(node, context); + } + + protected R visitSubqueryExpression(SubqueryExpression node, C context) { + return visitExpression(node, context); + } + + protected R visitSortItem(SortItem node, C context) { + return visitNode(node, context); + } + + protected R visitTable(Table node, C context) { + return visitQueryBody(node, context); + } + + protected R visitValues(Values node, C context) { + return visitQueryBody(node, context); + } + + protected R visitRow(Row node, C context) { + return visitNode(node, context); + } + + protected R visitTableSubquery(TableSubquery node, C context) { + return visitQueryBody(node, context); + } + + protected R visitAliasedRelation(AliasedRelation node, C context) { + return visitRelation(node, context); + } + + protected R visitSampledRelation(SampledRelation node, C context) { + return visitRelation(node, context); + } + + protected R visitJoin(Join node, C context) { + return visitRelation(node, context); + } + + protected R visitExists(ExistsPredicate node, C context) { + return visitExpression(node, context); + } + + protected R visitCast(Cast node, C context) { + return visitExpression(node, context); + } + + protected R visitFieldReference(FieldReference node, C context) { + return visitExpression(node, context); + } + + protected R visitWindow(Window node, C context) { + return visitNode(node, context); + } + + protected R visitWindowFrame(WindowFrame node, C context) { + return visitNode(node, context); + } + + protected R visitFrameBound(FrameBound node, C context) { + return visitNode(node, context); + } + + protected R visitTableElement(TableElement node, C context) { + return visitNode(node, context); + } + + protected R visitCreateStream(CreateStream node, C context) { + return visitStatement(node, context); + } + + protected R visitCreateStreamAsSelect(CreateStreamAsSelect node, C context) { + return visitStatement(node, context); + } + + protected R visitCreateTable(CreateTable node, C context) { + return visitStatement(node, context); + } + + protected R visitCreateTableAsSelect(CreateTableAsSelect node, C context) { + return visitStatement(node, context); + } + + protected R visitDropTable(DropTable node, C context) { + return visitStatement(node, context); + } + + protected R visitRenameTable(RenameTable node, C context) { + return visitStatement(node, context); + } + + protected R visitRenameColumn(RenameColumn node, C context) { + return visitStatement(node, context); + } + + protected R visitCreateView(CreateView node, C context) { + return visitStatement(node, context); + } + + protected R visitDropView(DropView node, C context) { + return visitStatement(node, context); + } + + protected R visitDelete(Delete node, C context) { + return visitStatement(node, context); + } + + protected R visitGroupBy(GroupBy node, C context) { + return visitNode(node, context); + } + + protected R visitGroupingElement(GroupingElement node, C context) { + return visitNode(node, context); + } + + protected R visitGroupingSets(GroupingSets node, C context) { + return visitGroupingElement(node, context); + } + + protected R visitSimpleGroupBy(SimpleGroupBy node, C context) { + return visitGroupingElement(node, context); + } + + protected R visitSymbolReference(SymbolReference node, C context) { + return visitExpression(node, context); + } +} diff --git a/ksql-core/src/main/java/io/confluent/ksql/parser/tree/DefaultExpressionTraversalVisitor.java b/ksql-core/src/main/java/io/confluent/ksql/parser/tree/DefaultExpressionTraversalVisitor.java new file mode 100644 index 000000000000..17fdbca40b18 --- /dev/null +++ b/ksql-core/src/main/java/io/confluent/ksql/parser/tree/DefaultExpressionTraversalVisitor.java @@ -0,0 +1,19 @@ +/** + * Copyright 2017 Confluent Inc. + * + **/ + +package io.confluent.ksql.parser.tree; + +/** + * When walking Expressions, don't traverse into SubqueryExpressions + */ +public abstract class DefaultExpressionTraversalVisitor + extends DefaultTraversalVisitor { + + @Override + protected R visitSubqueryExpression(SubqueryExpression node, C context) { + // Don't traverse into Subqueries within an Expression + return null; + } +} diff --git a/ksql-core/src/main/java/io/confluent/ksql/parser/tree/DefaultTraversalVisitor.java b/ksql-core/src/main/java/io/confluent/ksql/parser/tree/DefaultTraversalVisitor.java new file mode 100644 index 000000000000..41e82f74c2f6 --- /dev/null +++ b/ksql-core/src/main/java/io/confluent/ksql/parser/tree/DefaultTraversalVisitor.java @@ -0,0 +1,374 @@ +/** + * Copyright 2017 Confluent Inc. + * + **/ + +package io.confluent.ksql.parser.tree; + +import java.util.Set; + +public abstract class DefaultTraversalVisitor + extends AstVisitor { + + @Override + protected R visitExtract(Extract node, C context) { + return process(node.getExpression(), context); + } + + @Override + protected R visitCast(Cast node, C context) { + return process(node.getExpression(), context); + } + + @Override + protected R visitArithmeticBinary(ArithmeticBinaryExpression node, C context) { + process(node.getLeft(), context); + process(node.getRight(), context); + + return null; + } + + @Override + protected R visitBetweenPredicate(BetweenPredicate node, C context) { + process(node.getValue(), context); + process(node.getMin(), context); + process(node.getMax(), context); + + return null; + } + + @Override + protected R visitSubscriptExpression(SubscriptExpression node, C context) { + process(node.getBase(), context); + process(node.getIndex(), context); + + return null; + } + + @Override + protected R visitComparisonExpression(ComparisonExpression node, C context) { + process(node.getLeft(), context); + process(node.getRight(), context); + + return null; + } + + @Override + protected R visitQuery(Query node, C context) { + if (node.getWith().isPresent()) { + process(node.getWith().get(), context); + } + process(node.getQueryBody(), context); + for (SortItem sortItem : node.getOrderBy()) { + process(sortItem, context); + } + + return null; + } + + @Override + protected R visitWith(With node, C context) { + for (WithQuery query : node.getQueries()) { + process(query, context); + } + + return null; + } + + @Override + protected R visitWithQuery(WithQuery node, C context) { + return process(node.getQuery(), context); + } + + @Override + protected R visitSelect(Select node, C context) { + for (SelectItem item : node.getSelectItems()) { + process(item, context); + } + + return null; + } + + @Override + protected R visitSingleColumn(SingleColumn node, C context) { + process(node.getExpression(), context); + + return null; + } + + @Override + protected R visitWhenClause(WhenClause node, C context) { + process(node.getOperand(), context); + process(node.getResult(), context); + + return null; + } + + @Override + protected R visitInPredicate(InPredicate node, C context) { + process(node.getValue(), context); + process(node.getValueList(), context); + + return null; + } + + @Override + protected R visitFunctionCall(FunctionCall node, C context) { + for (Expression argument : node.getArguments()) { + process(argument, context); + } + + if (node.getWindow().isPresent()) { + process(node.getWindow().get(), context); + } + + return null; + } + + @Override + protected R visitDereferenceExpression(DereferenceExpression node, C context) { + process(node.getBase(), context); + return null; + } + + @Override + public R visitWindow(Window node, C context) { + + process(node.getWindowExpression(), context); + return null; + } + + @Override + public R visitWindowFrame(WindowFrame node, C context) { + process(node.getStart(), context); + if (node.getEnd().isPresent()) { + process(node.getEnd().get(), context); + } + + return null; + } + + @Override + public R visitFrameBound(FrameBound node, C context) { + if (node.getValue().isPresent()) { + process(node.getValue().get(), context); + } + + return null; + } + + @Override + protected R visitSimpleCaseExpression(SimpleCaseExpression node, C context) { + process(node.getOperand(), context); + for (WhenClause clause : node.getWhenClauses()) { + process(clause, context); + } + + node.getDefaultValue() + .ifPresent(value -> process(value, context)); + + return null; + } + + @Override + protected R visitInListExpression(InListExpression node, C context) { + for (Expression value : node.getValues()) { + process(value, context); + } + + return null; + } + + @Override + protected R visitNullIfExpression(NullIfExpression node, C context) { + process(node.getFirst(), context); + process(node.getSecond(), context); + + return null; + } + + @Override + protected R visitArithmeticUnary(ArithmeticUnaryExpression node, C context) { + return process(node.getValue(), context); + } + + @Override + protected R visitNotExpression(NotExpression node, C context) { + return process(node.getValue(), context); + } + + @Override + protected R visitSearchedCaseExpression(SearchedCaseExpression node, C context) { + for (WhenClause clause : node.getWhenClauses()) { + process(clause, context); + } + node.getDefaultValue() + .ifPresent(value -> process(value, context)); + + return null; + } + + @Override + protected R visitLikePredicate(LikePredicate node, C context) { + process(node.getValue(), context); + process(node.getPattern(), context); + if (node.getEscape() != null) { + process(node.getEscape(), context); + } + + return null; + } + + @Override + protected R visitIsNotNullPredicate(IsNotNullPredicate node, C context) { + return process(node.getValue(), context); + } + + @Override + protected R visitIsNullPredicate(IsNullPredicate node, C context) { + return process(node.getValue(), context); + } + + @Override + protected R visitLogicalBinaryExpression(LogicalBinaryExpression node, C context) { + process(node.getLeft(), context); + process(node.getRight(), context); + + return null; + } + + @Override + protected R visitSubqueryExpression(SubqueryExpression node, C context) { + return process(node.getQuery(), context); + } + + @Override + protected R visitSortItem(SortItem node, C context) { + return process(node.getSortKey(), context); + } + + @Override + protected R visitQuerySpecification(QuerySpecification node, C context) { + process(node.getSelect(), context); + if (node.getFrom().isPresent()) { + process(node.getFrom().get(), context); + } + if (node.getWhere().isPresent()) { + process(node.getWhere().get(), context); + } + if (node.getGroupBy().isPresent()) { + process(node.getGroupBy().get(), context); + } + if (node.getHaving().isPresent()) { + process(node.getHaving().get(), context); + } + for (SortItem sortItem : node.getOrderBy()) { + process(sortItem, context); + } + return null; + } + + @Override + protected R visitSetOperation(SetOperation node, C context) { + for (Relation relation : node.getRelations()) { + process(relation, context); + } + return null; + } + + @Override + protected R visitValues(Values node, C context) { + for (Expression row : node.getRows()) { + process(row, context); + } + return null; + } + + @Override + protected R visitRow(Row node, C context) { + for (Expression expression : node.getItems()) { + process(expression, context); + } + return null; + } + + @Override + protected R visitTableSubquery(TableSubquery node, C context) { + return process(node.getQuery(), context); + } + + @Override + protected R visitAliasedRelation(AliasedRelation node, C context) { + return process(node.getRelation(), context); + } + + @Override + protected R visitSampledRelation(SampledRelation node, C context) { + process(node.getRelation(), context); + process(node.getSamplePercentage(), context); + if (node.getColumnsToStratifyOn().isPresent()) { + for (Expression expression : node.getColumnsToStratifyOn().get()) { + process(expression, context); + } + } + return null; + } + + @Override + protected R visitJoin(Join node, C context) { + process(node.getLeft(), context); + process(node.getRight(), context); + + node.getCriteria() + .filter(criteria -> criteria instanceof JoinOn) + .map(criteria -> process(((JoinOn) criteria).getExpression(), context)); + + return null; + } + + @Override + protected R visitGroupBy(GroupBy node, C context) { + for (GroupingElement groupingElement : node.getGroupingElements()) { + process(groupingElement, context); + } + + return null; + } + + @Override + protected R visitGroupingElement(GroupingElement node, C context) { + for (Set expressions : node.enumerateGroupingSets()) { + for (Expression expression : expressions) { + process(expression, context); + } + } + return null; + } + + @Override + protected R visitSimpleGroupBy(SimpleGroupBy node, C context) { + visitGroupingElement(node, context); + + for (Expression expression : node.getColumnExpressions()) { + process(expression, context); + } + + return null; + } + + @Override + protected R visitDelete(Delete node, C context) { + process(node.getTable(), context); + node.getWhere().ifPresent(where -> process(where, context)); + + return null; + } + + @Override + protected R visitCreateTableAsSelect(CreateTableAsSelect node, C context) { + process(node.getQuery(), context); + node.getProperties().values().forEach(expression -> process(expression, context)); + + return null; + } +} diff --git a/ksql-core/src/main/java/io/confluent/ksql/parser/tree/Delete.java b/ksql-core/src/main/java/io/confluent/ksql/parser/tree/Delete.java new file mode 100644 index 000000000000..504c6124df36 --- /dev/null +++ b/ksql-core/src/main/java/io/confluent/ksql/parser/tree/Delete.java @@ -0,0 +1,72 @@ +/** + * Copyright 2017 Confluent Inc. + * + **/ + +package io.confluent.ksql.parser.tree; + +import java.util.Objects; +import java.util.Optional; + +import static com.google.common.base.MoreObjects.toStringHelper; +import static java.util.Objects.requireNonNull; + +public class Delete + extends Statement { + + private final Table table; + private final Optional where; + + public Delete(Table table, Optional where) { + this(Optional.empty(), table, where); + } + + public Delete(NodeLocation location, Table table, Optional where) { + this(Optional.of(location), table, where); + } + + private Delete(Optional location, Table table, Optional where) { + super(location); + this.table = requireNonNull(table, "table is null"); + this.where = requireNonNull(where, "where is null"); + } + + public Table getTable() { + return table; + } + + public Optional getWhere() { + return where; + } + + @Override + public R accept(AstVisitor visitor, C context) { + return visitor.visitDelete(this, context); + } + + @Override + public int hashCode() { + return Objects.hash(table, where); + } + + @Override + public boolean equals(Object obj) { + if (this == obj) { + return true; + } + if ((obj == null) || (getClass() != obj.getClass())) { + return false; + } + Delete o = (Delete) obj; + return Objects.equals(table, o.table) + && Objects.equals(where, o.where); + } + + @Override + public String toString() { + return toStringHelper(this) + .add("table", table.getName()) + .add("where", where) + .toString(); + } +} diff --git a/ksql-core/src/main/java/io/confluent/ksql/parser/tree/DereferenceExpression.java b/ksql-core/src/main/java/io/confluent/ksql/parser/tree/DereferenceExpression.java new file mode 100644 index 000000000000..b5c64aab9f03 --- /dev/null +++ b/ksql-core/src/main/java/io/confluent/ksql/parser/tree/DereferenceExpression.java @@ -0,0 +1,93 @@ +/** + * Copyright 2017 Confluent Inc. + * + **/ + +package io.confluent.ksql.parser.tree; + +import java.util.ArrayList; +import java.util.List; +import java.util.Objects; +import java.util.Optional; + +import static com.google.common.base.Preconditions.checkArgument; + +public class DereferenceExpression + extends Expression { + + private final Expression base; + private final String fieldName; + + public DereferenceExpression(Expression base, String fieldName) { + this(Optional.empty(), base, fieldName); + } + + public DereferenceExpression(NodeLocation location, Expression base, String fieldName) { + this(Optional.of(location), base, fieldName); + } + + private DereferenceExpression(Optional location, Expression base, + String fieldName) { + super(location); + checkArgument(base != null, "base is null"); + checkArgument(fieldName != null, "fieldName is null"); + this.base = base; + this.fieldName = fieldName; + } + + @Override + public R accept(AstVisitor visitor, C context) { + return visitor.visitDereferenceExpression(this, context); + } + + public Expression getBase() { + return base; + } + + public String getFieldName() { + return fieldName; + } + + /** + * If this DereferenceExpression looks like a QualifiedName, return QualifiedName. + * Otherwise return null + */ + public static QualifiedName getQualifiedName(DereferenceExpression expression) { + List parts = tryParseParts(expression.base, expression.fieldName); + return parts == null ? null : QualifiedName.of(parts); + } + + private static List tryParseParts(Expression base, String fieldName) { + if (base instanceof QualifiedNameReference) { + List newList = new ArrayList<>(((QualifiedNameReference) base).getName().getParts()); + newList.add(fieldName); + return newList; + } else if (base instanceof DereferenceExpression) { + QualifiedName baseQualifiedName = getQualifiedName((DereferenceExpression) base); + if (baseQualifiedName != null) { + List newList = new ArrayList<>(baseQualifiedName.getParts()); + newList.add(fieldName); + return newList; + } + } + return null; + } + + @Override + public boolean equals(Object o) { + if (this == o) { + return true; + } + if (o == null || getClass() != o.getClass()) { + return false; + } + DereferenceExpression that = (DereferenceExpression) o; + return Objects.equals(base, that.base) + && Objects.equals(fieldName, that.fieldName); + } + + @Override + public int hashCode() { + return Objects.hash(base, fieldName); + } +} diff --git a/ksql-core/src/main/java/io/confluent/ksql/parser/tree/DoubleLiteral.java b/ksql-core/src/main/java/io/confluent/ksql/parser/tree/DoubleLiteral.java new file mode 100644 index 000000000000..6ec92dcb4758 --- /dev/null +++ b/ksql-core/src/main/java/io/confluent/ksql/parser/tree/DoubleLiteral.java @@ -0,0 +1,64 @@ +/** + * Copyright 2017 Confluent Inc. + * + **/ + +package io.confluent.ksql.parser.tree; + +import java.util.Optional; + +import static java.util.Objects.requireNonNull; + +public class DoubleLiteral + extends Literal { + + private final double value; + + public DoubleLiteral(String value) { + this(Optional.empty(), value); + } + + public DoubleLiteral(NodeLocation location, String value) { + this(Optional.of(location), value); + } + + private DoubleLiteral(Optional location, String value) { + super(location); + requireNonNull(value, "value is null"); + this.value = Double.parseDouble(value); + } + + public double getValue() { + return value; + } + + @Override + public R accept(AstVisitor visitor, C context) { + return visitor.visitDoubleLiteral(this, context); + } + + @Override + public boolean equals(Object o) { + if (this == o) { + return true; + } + if (o == null || getClass() != o.getClass()) { + return false; + } + + DoubleLiteral that = (DoubleLiteral) o; + + if (Double.compare(that.value, value) != 0) { + return false; + } + + return true; + } + + @SuppressWarnings("UnaryPlus") + @Override + public int hashCode() { + long temp = value != +0.0d ? Double.doubleToLongBits(value) : 0L; + return (int) (temp ^ (temp >>> 32)); + } +} diff --git a/ksql-core/src/main/java/io/confluent/ksql/parser/tree/DropStream.java b/ksql-core/src/main/java/io/confluent/ksql/parser/tree/DropStream.java new file mode 100644 index 000000000000..06ec415ab454 --- /dev/null +++ b/ksql-core/src/main/java/io/confluent/ksql/parser/tree/DropStream.java @@ -0,0 +1,70 @@ +/** + * Copyright 2017 Confluent Inc. + **/ + +package io.confluent.ksql.parser.tree; + +import java.util.Objects; +import java.util.Optional; + +import static com.google.common.base.MoreObjects.toStringHelper; + +public class DropStream + extends AbstractStreamDropStatement { + + private final QualifiedName streamName; + private final boolean exists; + + public DropStream(QualifiedName tableName, boolean exists) { + this(Optional.empty(), tableName, exists); + } + + public DropStream(NodeLocation location, QualifiedName tableName, boolean exists) { + this(Optional.of(location), tableName, exists); + } + + private DropStream(Optional location, QualifiedName streamName, boolean exists) { + super(location); + this.streamName = streamName; + this.exists = exists; + } + + public QualifiedName getName() { + return streamName; + } + + public boolean isExists() { + return exists; + } + + @Override + public R accept(AstVisitor visitor, C context) { + return visitor.visitDropStream(this, context); + } + + @Override + public int hashCode() { + return Objects.hash(streamName, exists); + } + + @Override + public boolean equals(Object obj) { + if (this == obj) { + return true; + } + if ((obj == null) || (getClass() != obj.getClass())) { + return false; + } + DropStream o = (DropStream) obj; + return Objects.equals(streamName, o.streamName) + && (exists == o.exists); + } + + @Override + public String toString() { + return toStringHelper(this) + .add("tableName", streamName) + .add("exists", exists) + .toString(); + } +} diff --git a/ksql-core/src/main/java/io/confluent/ksql/parser/tree/DropTable.java b/ksql-core/src/main/java/io/confluent/ksql/parser/tree/DropTable.java new file mode 100644 index 000000000000..66a5afdf756f --- /dev/null +++ b/ksql-core/src/main/java/io/confluent/ksql/parser/tree/DropTable.java @@ -0,0 +1,69 @@ +/** + * Copyright 2017 Confluent Inc. + **/ + +package io.confluent.ksql.parser.tree; + +import java.util.Objects; +import java.util.Optional; + +import static com.google.common.base.MoreObjects.toStringHelper; + +public class DropTable extends AbstractStreamDropStatement { + + private final QualifiedName tableName; + private final boolean exists; + + public DropTable(QualifiedName tableName, boolean exists) { + this(Optional.empty(), tableName, exists); + } + + public DropTable(NodeLocation location, QualifiedName tableName, boolean exists) { + this(Optional.of(location), tableName, exists); + } + + private DropTable(Optional location, QualifiedName tableName, boolean exists) { + super(location); + this.tableName = tableName; + this.exists = exists; + } + + public QualifiedName getName() { + return tableName; + } + + public boolean isExists() { + return exists; + } + + @Override + public R accept(AstVisitor visitor, C context) { + return visitor.visitDropTable(this, context); + } + + @Override + public int hashCode() { + return Objects.hash(tableName, exists); + } + + @Override + public boolean equals(Object obj) { + if (this == obj) { + return true; + } + if ((obj == null) || (getClass() != obj.getClass())) { + return false; + } + DropTable o = (DropTable) obj; + return Objects.equals(tableName, o.tableName) + && (exists == o.exists); + } + + @Override + public String toString() { + return toStringHelper(this) + .add("tableName", tableName) + .add("exists", exists) + .toString(); + } +} diff --git a/ksql-core/src/main/java/io/confluent/ksql/parser/tree/DropTopic.java b/ksql-core/src/main/java/io/confluent/ksql/parser/tree/DropTopic.java new file mode 100644 index 000000000000..89ebbd254de5 --- /dev/null +++ b/ksql-core/src/main/java/io/confluent/ksql/parser/tree/DropTopic.java @@ -0,0 +1,70 @@ +/** + * Copyright 2017 Confluent Inc. + **/ + +package io.confluent.ksql.parser.tree; + +import java.util.Objects; +import java.util.Optional; + +import static com.google.common.base.MoreObjects.toStringHelper; + +public class DropTopic + extends Statement { + + private final QualifiedName topicName; + private final boolean exists; + + public DropTopic(QualifiedName tableName, boolean exists) { + this(Optional.empty(), tableName, exists); + } + + public DropTopic(NodeLocation location, QualifiedName tableName, boolean exists) { + this(Optional.of(location), tableName, exists); + } + + private DropTopic(Optional location, QualifiedName topicName, boolean exists) { + super(location); + this.topicName = topicName; + this.exists = exists; + } + + public QualifiedName getTopicName() { + return topicName; + } + + public boolean isExists() { + return exists; + } + + @Override + public R accept(AstVisitor visitor, C context) { + return visitor.visitDropTopic(this, context); + } + + @Override + public int hashCode() { + return Objects.hash(topicName, exists); + } + + @Override + public boolean equals(Object obj) { + if (this == obj) { + return true; + } + if ((obj == null) || (getClass() != obj.getClass())) { + return false; + } + DropTopic o = (DropTopic) obj; + return Objects.equals(topicName, o.topicName) + && (exists == o.exists); + } + + @Override + public String toString() { + return toStringHelper(this) + .add("topicName", topicName) + .add("exists", exists) + .toString(); + } +} diff --git a/ksql-core/src/main/java/io/confluent/ksql/parser/tree/DropView.java b/ksql-core/src/main/java/io/confluent/ksql/parser/tree/DropView.java new file mode 100644 index 000000000000..2c02e5adb405 --- /dev/null +++ b/ksql-core/src/main/java/io/confluent/ksql/parser/tree/DropView.java @@ -0,0 +1,70 @@ +/** + * Copyright 2017 Confluent Inc. + **/ + +package io.confluent.ksql.parser.tree; + +import java.util.Objects; +import java.util.Optional; + +import static com.google.common.base.MoreObjects.toStringHelper; + +public class DropView + extends Statement { + + private final QualifiedName name; + private final boolean exists; + + public DropView(QualifiedName name, boolean exists) { + this(Optional.empty(), name, exists); + } + + public DropView(NodeLocation location, QualifiedName name, boolean exists) { + this(Optional.of(location), name, exists); + } + + private DropView(Optional location, QualifiedName name, boolean exists) { + super(location); + this.name = name; + this.exists = exists; + } + + public QualifiedName getName() { + return name; + } + + public boolean isExists() { + return exists; + } + + @Override + public R accept(AstVisitor visitor, C context) { + return visitor.visitDropView(this, context); + } + + @Override + public int hashCode() { + return Objects.hash(name, exists); + } + + @Override + public boolean equals(Object obj) { + if (this == obj) { + return true; + } + if ((obj == null) || (getClass() != obj.getClass())) { + return false; + } + DropView o = (DropView) obj; + return Objects.equals(name, o.name) + && (exists == o.exists); + } + + @Override + public String toString() { + return toStringHelper(this) + .add("name", name) + .add("exists", exists) + .toString(); + } +} diff --git a/ksql-core/src/main/java/io/confluent/ksql/parser/tree/EmptyStatement.java b/ksql-core/src/main/java/io/confluent/ksql/parser/tree/EmptyStatement.java new file mode 100644 index 000000000000..eb0e30b9761c --- /dev/null +++ b/ksql-core/src/main/java/io/confluent/ksql/parser/tree/EmptyStatement.java @@ -0,0 +1,45 @@ +/** + * Copyright 2017 Confluent Inc. + **/ + +package io.confluent.ksql.parser.tree; + +import java.util.Objects; +import java.util.Optional; + +import static com.google.common.base.MoreObjects.toStringHelper; + +public class EmptyStatement extends Statement { + + public EmptyStatement() { + this(Optional.empty()); + } + + public EmptyStatement(NodeLocation location) { + this(Optional.of(location)); + } + + private EmptyStatement(Optional location) { + super(location); + } + + @Override + public R accept(AstVisitor visitor, C context) { + return null; + } + + @Override + public int hashCode() { + return Objects.hash(this); + } + + @Override + public boolean equals(Object obj) { + return obj instanceof EmptyStatement; + } + + @Override + public String toString() { + return toStringHelper(this).toString(); + } +} diff --git a/ksql-core/src/main/java/io/confluent/ksql/parser/tree/Except.java b/ksql-core/src/main/java/io/confluent/ksql/parser/tree/Except.java new file mode 100644 index 000000000000..9fe6ad0aa50c --- /dev/null +++ b/ksql-core/src/main/java/io/confluent/ksql/parser/tree/Except.java @@ -0,0 +1,84 @@ +/** + * Copyright 2017 Confluent Inc. + **/ + +package io.confluent.ksql.parser.tree; + +import com.google.common.collect.ImmutableList; + +import java.util.List; +import java.util.Objects; +import java.util.Optional; + +import static com.google.common.base.MoreObjects.toStringHelper; +import static java.util.Objects.requireNonNull; + +public class Except + extends SetOperation { + + private final Relation left; + private final Relation right; + + public Except(Relation left, Relation right, boolean distinct) { + this(Optional.empty(), left, right, distinct); + } + + public Except(NodeLocation location, Relation left, Relation right, boolean distinct) { + this(Optional.of(location), left, right, distinct); + } + + private Except(Optional location, Relation left, Relation right, boolean distinct) { + super(location, distinct); + requireNonNull(left, "left is null"); + requireNonNull(right, "right is null"); + + this.left = left; + this.right = right; + } + + public Relation getLeft() { + return left; + } + + public Relation getRight() { + return right; + } + + @Override + public R accept(AstVisitor visitor, C context) { + return visitor.visitExcept(this, context); + } + + @Override + public List getRelations() { + return ImmutableList.of(left, right); + } + + @Override + public String toString() { + return toStringHelper(this) + .add("left", left) + .add("right", right) + .add("distinct", isDistinct()) + .toString(); + } + + @Override + public boolean equals(Object obj) { + if (this == obj) { + return true; + } + if ((obj == null) || (getClass() != obj.getClass())) { + return false; + } + Except o = (Except) obj; + return Objects.equals(left, o.left) + && Objects.equals(right, o.right) + && Objects.equals(isDistinct(), o.isDistinct()); + } + + @Override + public int hashCode() { + return Objects.hash(left, right, isDistinct()); + } +} diff --git a/ksql-core/src/main/java/io/confluent/ksql/parser/tree/ExistsPredicate.java b/ksql-core/src/main/java/io/confluent/ksql/parser/tree/ExistsPredicate.java new file mode 100644 index 000000000000..72f894d6846e --- /dev/null +++ b/ksql-core/src/main/java/io/confluent/ksql/parser/tree/ExistsPredicate.java @@ -0,0 +1,57 @@ +/** + * Copyright 2017 Confluent Inc. + **/ + +package io.confluent.ksql.parser.tree; + +import java.util.Objects; +import java.util.Optional; + +import static java.util.Objects.requireNonNull; + +public class ExistsPredicate + extends Expression { + + private final Query subquery; + + public ExistsPredicate(Query subquery) { + this(Optional.empty(), subquery); + } + + public ExistsPredicate(NodeLocation location, Query subquery) { + this(Optional.of(location), subquery); + } + + private ExistsPredicate(Optional location, Query subquery) { + super(location); + requireNonNull(subquery, "subquery is null"); + this.subquery = subquery; + } + + public Query getSubquery() { + return subquery; + } + + @Override + public R accept(AstVisitor visitor, C context) { + return visitor.visitExists(this, context); + } + + @Override + public boolean equals(Object o) { + if (this == o) { + return true; + } + if (o == null || getClass() != o.getClass()) { + return false; + } + + ExistsPredicate that = (ExistsPredicate) o; + return Objects.equals(subquery, that.subquery); + } + + @Override + public int hashCode() { + return subquery.hashCode(); + } +} diff --git a/ksql-core/src/main/java/io/confluent/ksql/parser/tree/Explain.java b/ksql-core/src/main/java/io/confluent/ksql/parser/tree/Explain.java new file mode 100644 index 000000000000..425f3af07868 --- /dev/null +++ b/ksql-core/src/main/java/io/confluent/ksql/parser/tree/Explain.java @@ -0,0 +1,88 @@ +/** + * Copyright 2017 Confluent Inc. + **/ + +package io.confluent.ksql.parser.tree; + +import com.google.common.collect.ImmutableList; + +import java.util.List; +import java.util.Objects; +import java.util.Optional; + +import static com.google.common.base.MoreObjects.toStringHelper; +import static java.util.Objects.requireNonNull; + +public class Explain + extends Statement { + + private final Statement statement; + private final boolean analyze; + private final List options; + + public Explain(Statement statement, boolean analyze, List options) { + this(Optional.empty(), analyze, statement, options); + } + + public Explain(NodeLocation location, boolean analyze, Statement statement, + List options) { + this(Optional.of(location), analyze, statement, options); + } + + private Explain(Optional location, boolean analyze, Statement statement, + List options) { + super(location); + this.statement = requireNonNull(statement, "statement is null"); + this.analyze = analyze; + if (options == null) { + this.options = ImmutableList.of(); + } else { + this.options = ImmutableList.copyOf(options); + } + } + + public Statement getStatement() { + return statement; + } + + public boolean isAnalyze() { + return analyze; + } + + public List getOptions() { + return options; + } + + @Override + public R accept(AstVisitor visitor, C context) { + return visitor.visitExplain(this, context); + } + + @Override + public int hashCode() { + return Objects.hash(statement, options, analyze); + } + + @Override + public boolean equals(Object obj) { + if (this == obj) { + return true; + } + if ((obj == null) || (getClass() != obj.getClass())) { + return false; + } + Explain o = (Explain) obj; + return Objects.equals(statement, o.statement) + && Objects.equals(options, o.options) + && Objects.equals(analyze, o.analyze); + } + + @Override + public String toString() { + return toStringHelper(this) + .add("statement", statement) + .add("options", options) + .add("analyze", analyze) + .toString(); + } +} diff --git a/ksql-core/src/main/java/io/confluent/ksql/parser/tree/ExplainFormat.java b/ksql-core/src/main/java/io/confluent/ksql/parser/tree/ExplainFormat.java new file mode 100644 index 000000000000..5cffa758beae --- /dev/null +++ b/ksql-core/src/main/java/io/confluent/ksql/parser/tree/ExplainFormat.java @@ -0,0 +1,63 @@ +/** + * Copyright 2017 Confluent Inc. + **/ + +package io.confluent.ksql.parser.tree; + +import java.util.Objects; +import java.util.Optional; + +import static com.google.common.base.MoreObjects.toStringHelper; +import static java.util.Objects.requireNonNull; + +public class ExplainFormat + extends ExplainOption { + + public enum Type { + TEXT, + GRAPHVIZ + } + + private final Type type; + + public ExplainFormat(Type type) { + this(Optional.empty(), type); + } + + public ExplainFormat(NodeLocation location, Type type) { + this(Optional.of(location), type); + } + + private ExplainFormat(Optional location, Type type) { + super(location); + this.type = requireNonNull(type, "type is null"); + } + + public Type getType() { + return type; + } + + @Override + public int hashCode() { + return Objects.hash(type); + } + + @Override + public boolean equals(Object obj) { + if (this == obj) { + return true; + } + if ((obj == null) || (getClass() != obj.getClass())) { + return false; + } + ExplainFormat o = (ExplainFormat) obj; + return Objects.equals(type, o.type); + } + + @Override + public String toString() { + return toStringHelper(this) + .add("type", type) + .toString(); + } +} diff --git a/ksql-core/src/main/java/io/confluent/ksql/parser/tree/ExplainOption.java b/ksql-core/src/main/java/io/confluent/ksql/parser/tree/ExplainOption.java new file mode 100644 index 000000000000..12ed371e0356 --- /dev/null +++ b/ksql-core/src/main/java/io/confluent/ksql/parser/tree/ExplainOption.java @@ -0,0 +1,20 @@ +/** + * Copyright 2017 Confluent Inc. + **/ + +package io.confluent.ksql.parser.tree; + +import java.util.Optional; + +public abstract class ExplainOption + extends Node { + + protected ExplainOption(Optional location) { + super(location); + } + + @Override + public R accept(AstVisitor visitor, C context) { + return visitor.visitExplainOption(this, context); + } +} diff --git a/ksql-core/src/main/java/io/confluent/ksql/parser/tree/ExplainType.java b/ksql-core/src/main/java/io/confluent/ksql/parser/tree/ExplainType.java new file mode 100644 index 000000000000..b6dd02ef1380 --- /dev/null +++ b/ksql-core/src/main/java/io/confluent/ksql/parser/tree/ExplainType.java @@ -0,0 +1,63 @@ +/** + * Copyright 2017 Confluent Inc. + **/ + +package io.confluent.ksql.parser.tree; + +import java.util.Objects; +import java.util.Optional; + +import static com.google.common.base.MoreObjects.toStringHelper; +import static java.util.Objects.requireNonNull; + +public class ExplainType + extends ExplainOption { + + public enum Type { + LOGICAL, + DISTRIBUTED + } + + private final Type type; + + public ExplainType(Type type) { + this(Optional.empty(), type); + } + + public ExplainType(NodeLocation location, Type type) { + this(Optional.of(location), type); + } + + private ExplainType(Optional location, Type type) { + super(location); + this.type = requireNonNull(type, "type is null"); + } + + public Type getType() { + return type; + } + + @Override + public int hashCode() { + return Objects.hash(type); + } + + @Override + public boolean equals(Object obj) { + if (this == obj) { + return true; + } + if ((obj == null) || (getClass() != obj.getClass())) { + return false; + } + ExplainType o = (ExplainType) obj; + return Objects.equals(type, o.type); + } + + @Override + public String toString() { + return toStringHelper(this) + .add("type", type) + .toString(); + } +} diff --git a/ksql-core/src/main/java/io/confluent/ksql/parser/tree/ExportCatalog.java b/ksql-core/src/main/java/io/confluent/ksql/parser/tree/ExportCatalog.java new file mode 100644 index 000000000000..ebb21be05a31 --- /dev/null +++ b/ksql-core/src/main/java/io/confluent/ksql/parser/tree/ExportCatalog.java @@ -0,0 +1,45 @@ +/** + * Copyright 2017 Confluent Inc. + **/ + +package io.confluent.ksql.parser.tree; + +import com.google.common.base.MoreObjects; + +import java.util.Objects; +import java.util.Optional; + +public class ExportCatalog extends Statement { + + final String catalogFilePath; + + public ExportCatalog(Optional location, String catalogFilePath) { + super(location); + if (catalogFilePath.startsWith("'") && catalogFilePath.endsWith("'")) { + this.catalogFilePath = catalogFilePath.substring(1, catalogFilePath.length() - 1); + } else { + this.catalogFilePath = catalogFilePath; + } + + } + + public String getCatalogFilePath() { + return catalogFilePath; + } + + @Override + public int hashCode() { + return Objects.hash("ListStreams"); + } + + @Override + public boolean equals(Object obj) { + return this == obj; + } + + @Override + public String toString() { + return MoreObjects.toStringHelper(this) + .toString(); + } +} diff --git a/ksql-core/src/main/java/io/confluent/ksql/parser/tree/Expression.java b/ksql-core/src/main/java/io/confluent/ksql/parser/tree/Expression.java new file mode 100644 index 000000000000..ad182d1d7e1e --- /dev/null +++ b/ksql-core/src/main/java/io/confluent/ksql/parser/tree/Expression.java @@ -0,0 +1,42 @@ +/** + * Copyright 2017 Confluent Inc. + **/ + +package io.confluent.ksql.parser.tree; + +import io.confluent.ksql.parser.CodegenExpressionFormatter; +import io.confluent.ksql.parser.ExpressionFormatter; +import io.confluent.ksql.util.KsqlException; + +import org.apache.kafka.connect.data.Schema; + +import java.util.Optional; + +public abstract class Expression + extends Node { + + protected Expression(Optional location) { + super(location); + } + + /** + * Accessible for {@link AstVisitor}, use {@link AstVisitor#process(Node, Object)} instead. + */ + @Override + protected R accept(AstVisitor visitor, C context) { + return visitor.visitExpression(this, context); + } + + @Override + public final String toString() { + return ExpressionFormatter.formatExpression(this); + } + + public final String getCodegenString(Schema schema) { + try { + return CodegenExpressionFormatter.formatExpression(this, schema); + } catch (Exception e) { + throw new KsqlException(e.getMessage()); + } + } +} diff --git a/ksql-core/src/main/java/io/confluent/ksql/parser/tree/ExpressionRewriter.java b/ksql-core/src/main/java/io/confluent/ksql/parser/tree/ExpressionRewriter.java new file mode 100644 index 000000000000..d046122da297 --- /dev/null +++ b/ksql-core/src/main/java/io/confluent/ksql/parser/tree/ExpressionRewriter.java @@ -0,0 +1,151 @@ +/** + * Copyright 2017 Confluent Inc. + **/ + +package io.confluent.ksql.parser.tree; + +public class ExpressionRewriter { + + public Expression rewriteExpression(Expression node, C context, + ExpressionTreeRewriter treeRewriter) { + return null; + } + + public Expression rewriteRow(Row node, C context, ExpressionTreeRewriter treeRewriter) { + return rewriteExpression(node, context, treeRewriter); + } + + public Expression rewriteArithmeticUnary(ArithmeticUnaryExpression node, C context, + ExpressionTreeRewriter treeRewriter) { + return rewriteExpression(node, context, treeRewriter); + } + + public Expression rewriteArithmeticBinary(ArithmeticBinaryExpression node, C context, + ExpressionTreeRewriter treeRewriter) { + return rewriteExpression(node, context, treeRewriter); + } + + public Expression rewriteComparisonExpression(ComparisonExpression node, C context, + ExpressionTreeRewriter treeRewriter) { + return rewriteExpression(node, context, treeRewriter); + } + + public Expression rewriteBetweenPredicate(BetweenPredicate node, C context, + ExpressionTreeRewriter treeRewriter) { + return rewriteExpression(node, context, treeRewriter); + } + + public Expression rewriteLogicalBinaryExpression(LogicalBinaryExpression node, C context, + ExpressionTreeRewriter treeRewriter) { + return rewriteExpression(node, context, treeRewriter); + } + + public Expression rewriteNotExpression(NotExpression node, C context, + ExpressionTreeRewriter treeRewriter) { + return rewriteExpression(node, context, treeRewriter); + } + + public Expression rewriteIsNullPredicate(IsNullPredicate node, C context, + ExpressionTreeRewriter treeRewriter) { + return rewriteExpression(node, context, treeRewriter); + } + + public Expression rewriteIsNotNullPredicate(IsNotNullPredicate node, C context, + ExpressionTreeRewriter treeRewriter) { + return rewriteExpression(node, context, treeRewriter); + } + + public Expression rewriteNullIfExpression(NullIfExpression node, C context, + ExpressionTreeRewriter treeRewriter) { + return rewriteExpression(node, context, treeRewriter); + } + + public Expression rewriteSearchedCaseExpression(SearchedCaseExpression node, C context, + ExpressionTreeRewriter treeRewriter) { + return rewriteExpression(node, context, treeRewriter); + } + + public Expression rewriteSimpleCaseExpression(SimpleCaseExpression node, C context, + ExpressionTreeRewriter treeRewriter) { + return rewriteExpression(node, context, treeRewriter); + } + + public Expression rewriteWhenClause(WhenClause node, C context, + ExpressionTreeRewriter treeRewriter) { + return rewriteExpression(node, context, treeRewriter); + } + + public Expression rewriteInListExpression(InListExpression node, C context, + ExpressionTreeRewriter treeRewriter) { + return rewriteExpression(node, context, treeRewriter); + } + + public Expression rewriteFunctionCall(FunctionCall node, C context, + ExpressionTreeRewriter treeRewriter) { + return rewriteExpression(node, context, treeRewriter); + } + + public Expression rewriteLambdaExpression(LambdaExpression node, C context, + ExpressionTreeRewriter treeRewriter) { + return rewriteExpression(node, context, treeRewriter); + } + + public Expression rewriteLikePredicate(LikePredicate node, C context, + ExpressionTreeRewriter treeRewriter) { + return rewriteExpression(node, context, treeRewriter); + } + + public Expression rewriteInPredicate(InPredicate node, C context, + ExpressionTreeRewriter treeRewriter) { + return rewriteExpression(node, context, treeRewriter); + } + + public Expression rewriteExists(ExistsPredicate node, C context, + ExpressionTreeRewriter treeRewriter) { + return rewriteExpression(node, context, treeRewriter); + } + + public Expression rewriteSubqueryExpression(SubqueryExpression node, C context, + ExpressionTreeRewriter treeRewriter) { + return rewriteExpression(node, context, treeRewriter); + } + + public Expression rewriteLiteral(Literal node, C context, + ExpressionTreeRewriter treeRewriter) { + return rewriteExpression(node, context, treeRewriter); + } + + public Expression rewriteSubscriptExpression(SubscriptExpression node, C context, + ExpressionTreeRewriter treeRewriter) { + return rewriteExpression(node, context, treeRewriter); + } + + public Expression rewriteQualifiedNameReference(QualifiedNameReference node, C context, + ExpressionTreeRewriter treeRewriter) { + return rewriteExpression(node, context, treeRewriter); + } + + public Expression rewriteDereferenceExpression(DereferenceExpression node, C context, + ExpressionTreeRewriter treeRewriter) { + return rewriteExpression(node, context, treeRewriter); + } + + public Expression rewriteExtract(Extract node, C context, + ExpressionTreeRewriter treeRewriter) { + return rewriteExpression(node, context, treeRewriter); + } + + public Expression rewriteCast(Cast node, C context, ExpressionTreeRewriter treeRewriter) { + return rewriteExpression(node, context, treeRewriter); + } + + public Expression rewriteFieldReference(FieldReference node, C context, + ExpressionTreeRewriter treeRewriter) { + return rewriteExpression(node, context, treeRewriter); + } + + public Expression rewriteSymbolReference(SymbolReference node, C context, + ExpressionTreeRewriter treeRewriter) { + return rewriteExpression(node, context, treeRewriter); + } +} diff --git a/ksql-core/src/main/java/io/confluent/ksql/parser/tree/ExpressionTreeRewriter.java b/ksql-core/src/main/java/io/confluent/ksql/parser/tree/ExpressionTreeRewriter.java new file mode 100644 index 000000000000..ce16b0259f66 --- /dev/null +++ b/ksql-core/src/main/java/io/confluent/ksql/parser/tree/ExpressionTreeRewriter.java @@ -0,0 +1,668 @@ +/** + * Copyright 2017 Confluent Inc. + **/ + +package io.confluent.ksql.parser.tree; + +import com.google.common.collect.ImmutableList; +import com.google.common.collect.Iterables; + +import java.util.Iterator; +import java.util.Optional; + +public final class ExpressionTreeRewriter { + + private final ExpressionRewriter rewriter; + private final AstVisitor> visitor; + + public static T rewriteWith(ExpressionRewriter rewriter, T node) { + return new ExpressionTreeRewriter<>(rewriter).rewrite(node, null); + } + + public static T rewriteWith(ExpressionRewriter rewriter, T node, + C context) { + return new ExpressionTreeRewriter<>(rewriter).rewrite(node, context); + } + + public ExpressionTreeRewriter(ExpressionRewriter rewriter) { + this.rewriter = rewriter; + this.visitor = new RewritingVisitor(); + } + + @SuppressWarnings("unchecked") + public T rewrite(T node, C context) { + return (T) visitor.process(node, new Context<>(context, false)); + } + + /** + * Invoke the default rewrite logic explicitly. Specifically, it skips the invocation of the + * expression rewriter for the provided node. + */ + @SuppressWarnings("unchecked") + public T defaultRewrite(T node, C context) { + return (T) visitor.process(node, new Context<>(context, true)); + } + + private class RewritingVisitor + extends AstVisitor> { + + @Override + protected Expression visitExpression(Expression node, Context context) { + if (!context.isDefaultRewrite()) { + Expression + result = + rewriter.rewriteExpression(node, context.get(), ExpressionTreeRewriter.this); + if (result != null) { + return result; + } + } + + throw new UnsupportedOperationException( + "not yet implemented: " + getClass().getSimpleName() + " for " + node.getClass() + .getName()); + } + + @Override + protected Expression visitRow(Row node, Context context) { + if (!context.isDefaultRewrite()) { + Expression result = rewriter.rewriteRow(node, context.get(), ExpressionTreeRewriter.this); + if (result != null) { + return result; + } + } + + ImmutableList.Builder builder = ImmutableList.builder(); + for (Expression expression : node.getItems()) { + builder.add(rewrite(expression, context.get())); + } + + if (!sameElements(node.getItems(), builder.build())) { + return new Row(builder.build()); + } + + return node; + } + + @Override + protected Expression visitArithmeticUnary(ArithmeticUnaryExpression node, Context context) { + if (!context.isDefaultRewrite()) { + Expression + result = + rewriter.rewriteArithmeticUnary(node, context.get(), ExpressionTreeRewriter.this); + if (result != null) { + return result; + } + } + + Expression child = rewrite(node.getValue(), context.get()); + if (child != node.getValue()) { + return new ArithmeticUnaryExpression(node.getSign(), child); + } + + return node; + } + + @Override + public Expression visitArithmeticBinary(ArithmeticBinaryExpression node, Context context) { + if (!context.isDefaultRewrite()) { + Expression + result = + rewriter.rewriteArithmeticBinary(node, context.get(), ExpressionTreeRewriter.this); + if (result != null) { + return result; + } + } + + Expression left = rewrite(node.getLeft(), context.get()); + Expression right = rewrite(node.getRight(), context.get()); + + if (left != node.getLeft() || right != node.getRight()) { + return new ArithmeticBinaryExpression(node.getType(), left, right); + } + + return node; + } + + @Override + protected Expression visitSubscriptExpression(SubscriptExpression node, Context context) { + if (!context.isDefaultRewrite()) { + Expression + result = + rewriter.rewriteSubscriptExpression(node, context.get(), ExpressionTreeRewriter.this); + if (result != null) { + return result; + } + } + + Expression base = rewrite(node.getBase(), context.get()); + Expression index = rewrite(node.getIndex(), context.get()); + + if (base != node.getBase() || index != node.getIndex()) { + return new SubscriptExpression(base, index); + } + + return node; + } + + @Override + public Expression visitComparisonExpression(ComparisonExpression node, Context context) { + if (!context.isDefaultRewrite()) { + Expression + result = + rewriter.rewriteComparisonExpression(node, context.get(), ExpressionTreeRewriter.this); + if (result != null) { + return result; + } + } + + Expression left = rewrite(node.getLeft(), context.get()); + Expression right = rewrite(node.getRight(), context.get()); + + if (left != node.getLeft() || right != node.getRight()) { + return new ComparisonExpression(node.getType(), left, right); + } + + return node; + } + + @Override + protected Expression visitBetweenPredicate(BetweenPredicate node, Context context) { + if (!context.isDefaultRewrite()) { + Expression + result = + rewriter.rewriteBetweenPredicate(node, context.get(), ExpressionTreeRewriter.this); + if (result != null) { + return result; + } + } + + Expression value = rewrite(node.getValue(), context.get()); + Expression min = rewrite(node.getMin(), context.get()); + Expression max = rewrite(node.getMax(), context.get()); + + if (value != node.getValue() || min != node.getMin() || max != node.getMax()) { + return new BetweenPredicate(value, min, max); + } + + return node; + } + + @Override + public Expression visitLogicalBinaryExpression(LogicalBinaryExpression node, + Context context) { + if (!context.isDefaultRewrite()) { + Expression + result = + rewriter + .rewriteLogicalBinaryExpression(node, context.get(), ExpressionTreeRewriter.this); + if (result != null) { + return result; + } + } + + Expression left = rewrite(node.getLeft(), context.get()); + Expression right = rewrite(node.getRight(), context.get()); + + if (left != node.getLeft() || right != node.getRight()) { + return new LogicalBinaryExpression(node.getType(), left, right); + } + + return node; + } + + @Override + public Expression visitNotExpression(NotExpression node, Context context) { + if (!context.isDefaultRewrite()) { + Expression + result = + rewriter.rewriteNotExpression(node, context.get(), ExpressionTreeRewriter.this); + if (result != null) { + return result; + } + } + + Expression value = rewrite(node.getValue(), context.get()); + + if (value != node.getValue()) { + return new NotExpression(value); + } + + return node; + } + + @Override + protected Expression visitIsNullPredicate(IsNullPredicate node, Context context) { + if (!context.isDefaultRewrite()) { + Expression + result = + rewriter.rewriteIsNullPredicate(node, context.get(), ExpressionTreeRewriter.this); + if (result != null) { + return result; + } + } + + Expression value = rewrite(node.getValue(), context.get()); + + if (value != node.getValue()) { + return new IsNullPredicate(value); + } + + return node; + } + + @Override + protected Expression visitIsNotNullPredicate(IsNotNullPredicate node, Context context) { + if (!context.isDefaultRewrite()) { + Expression + result = + rewriter.rewriteIsNotNullPredicate(node, context.get(), ExpressionTreeRewriter.this); + if (result != null) { + return result; + } + } + + Expression value = rewrite(node.getValue(), context.get()); + + if (value != node.getValue()) { + return new IsNotNullPredicate(value); + } + + return node; + } + + @Override + protected Expression visitNullIfExpression(NullIfExpression node, Context context) { + if (!context.isDefaultRewrite()) { + Expression + result = + rewriter.rewriteNullIfExpression(node, context.get(), ExpressionTreeRewriter.this); + if (result != null) { + return result; + } + } + + Expression first = rewrite(node.getFirst(), context.get()); + Expression second = rewrite(node.getSecond(), context.get()); + + if (first != node.getFirst() || second != node.getSecond()) { + return new NullIfExpression(first, second); + } + + return node; + } + + @Override + protected Expression visitSearchedCaseExpression(SearchedCaseExpression node, + Context context) { + if (!context.isDefaultRewrite()) { + Expression + result = + rewriter + .rewriteSearchedCaseExpression(node, context.get(), ExpressionTreeRewriter.this); + if (result != null) { + return result; + } + } + + ImmutableList.Builder builder = ImmutableList.builder(); + for (WhenClause expression : node.getWhenClauses()) { + builder.add(rewrite(expression, context.get())); + } + + Optional defaultValue = node.getDefaultValue() + .map(value -> rewrite(value, context.get())); + + if (!sameElements(node.getDefaultValue(), defaultValue) || !sameElements( + node.getWhenClauses(), builder.build())) { + return new SearchedCaseExpression(builder.build(), defaultValue); + } + + return node; + } + + @Override + protected Expression visitSimpleCaseExpression(SimpleCaseExpression node, Context context) { + if (!context.isDefaultRewrite()) { + Expression + result = + rewriter.rewriteSimpleCaseExpression(node, context.get(), ExpressionTreeRewriter.this); + if (result != null) { + return result; + } + } + + Expression operand = rewrite(node.getOperand(), context.get()); + + ImmutableList.Builder builder = ImmutableList.builder(); + for (WhenClause expression : node.getWhenClauses()) { + builder.add(rewrite(expression, context.get())); + } + + Optional defaultValue = node.getDefaultValue() + .map(value -> rewrite(value, context.get())); + + if (operand != node.getOperand() + || !sameElements(node.getDefaultValue(), defaultValue) + || !sameElements(node.getWhenClauses(), builder.build())) { + return new SimpleCaseExpression(operand, builder.build(), defaultValue); + } + + return node; + } + + @Override + protected Expression visitWhenClause(WhenClause node, Context context) { + if (!context.isDefaultRewrite()) { + Expression + result = + rewriter.rewriteWhenClause(node, context.get(), ExpressionTreeRewriter.this); + if (result != null) { + return result; + } + } + + Expression operand = rewrite(node.getOperand(), context.get()); + Expression result = rewrite(node.getResult(), context.get()); + + if (operand != node.getOperand() || result != node.getResult()) { + return new WhenClause(operand, result); + } + return node; + } + + @Override + public Expression visitFunctionCall(FunctionCall node, Context context) { + if (!context.isDefaultRewrite()) { + Expression + result = + rewriter.rewriteFunctionCall(node, context.get(), ExpressionTreeRewriter.this); + if (result != null) { + return result; + } + } + + Optional rewrittenWindow = node.getWindow(); + + return node; + } + + @Override + protected Expression visitLambdaExpression(LambdaExpression node, Context context) { + if (!context.isDefaultRewrite()) { + Expression + result = + rewriter.rewriteLambdaExpression(node, context.get(), ExpressionTreeRewriter.this); + if (result != null) { + return result; + } + } + + Expression body = rewrite(node.getBody(), context.get()); + if (body != node.getBody()) { + return new LambdaExpression(node.getArguments(), body); + } + + return node; + } + + @Override + public Expression visitLikePredicate(LikePredicate node, Context context) { + if (!context.isDefaultRewrite()) { + Expression + result = + rewriter.rewriteLikePredicate(node, context.get(), ExpressionTreeRewriter.this); + if (result != null) { + return result; + } + } + + Expression value = rewrite(node.getValue(), context.get()); + Expression pattern = rewrite(node.getPattern(), context.get()); + Expression escape = null; + if (node.getEscape() != null) { + escape = rewrite(node.getEscape(), context.get()); + } + + if (value != node.getValue() || pattern != node.getPattern() || escape != node.getEscape()) { + return new LikePredicate(value, pattern, escape); + } + + return node; + } + + @Override + public Expression visitInPredicate(InPredicate node, Context context) { + if (!context.isDefaultRewrite()) { + Expression + result = + rewriter.rewriteInPredicate(node, context.get(), ExpressionTreeRewriter.this); + if (result != null) { + return result; + } + } + + Expression value = rewrite(node.getValue(), context.get()); + Expression list = rewrite(node.getValueList(), context.get()); + + if (node.getValue() != value || node.getValueList() != list) { + return new InPredicate(value, list); + } + + return node; + } + + @Override + protected Expression visitInListExpression(InListExpression node, Context context) { + if (!context.isDefaultRewrite()) { + Expression + result = + rewriter.rewriteInListExpression(node, context.get(), ExpressionTreeRewriter.this); + if (result != null) { + return result; + } + } + + ImmutableList.Builder builder = ImmutableList.builder(); + for (Expression expression : node.getValues()) { + builder.add(rewrite(expression, context.get())); + } + + if (!sameElements(node.getValues(), builder.build())) { + return new InListExpression(builder.build()); + } + + return node; + } + + @Override + protected Expression visitExists(ExistsPredicate node, Context context) { + if (!context.isDefaultRewrite()) { + Expression + result = + rewriter.rewriteExists(node, context.get(), ExpressionTreeRewriter.this); + if (result != null) { + return result; + } + } + + // No default rewrite for ExistsPredicate since we do not want to traverse subqueries + return node; + } + + @Override + public Expression visitSubqueryExpression(SubqueryExpression node, Context context) { + if (!context.isDefaultRewrite()) { + Expression + result = + rewriter.rewriteSubqueryExpression(node, context.get(), ExpressionTreeRewriter.this); + if (result != null) { + return result; + } + } + + // No default rewrite for SubqueryExpression since we do not want to traverse subqueries + return node; + } + + @Override + public Expression visitLiteral(Literal node, Context context) { + if (!context.isDefaultRewrite()) { + Expression + result = + rewriter.rewriteLiteral(node, context.get(), ExpressionTreeRewriter.this); + if (result != null) { + return result; + } + } + + return node; + } + + @Override + public Expression visitQualifiedNameReference(QualifiedNameReference node, Context context) { + if (!context.isDefaultRewrite()) { + Expression + result = + rewriter + .rewriteQualifiedNameReference(node, context.get(), ExpressionTreeRewriter.this); + if (result != null) { + return result; + } + } + + return node; + } + + @Override + public Expression visitDereferenceExpression(DereferenceExpression node, Context context) { + if (!context.isDefaultRewrite()) { + Expression + result = + rewriter.rewriteDereferenceExpression(node, context.get(), ExpressionTreeRewriter.this); + if (result != null) { + return result; + } + } + + Expression base = rewrite(node.getBase(), context.get()); + if (base != node.getBase()) { + return new DereferenceExpression(base, node.getFieldName()); + } + + return node; + } + + @Override + protected Expression visitExtract(Extract node, Context context) { + if (!context.isDefaultRewrite()) { + Expression + result = + rewriter.rewriteExtract(node, context.get(), ExpressionTreeRewriter.this); + if (result != null) { + return result; + } + } + + Expression expression = rewrite(node.getExpression(), context.get()); + + if (node.getExpression() != expression) { + return new Extract(expression, node.getField()); + } + + return node; + } + + @Override + public Expression visitCast(Cast node, Context context) { + if (!context.isDefaultRewrite()) { + Expression result = rewriter.rewriteCast(node, context.get(), ExpressionTreeRewriter.this); + if (result != null) { + return result; + } + } + + Expression expression = rewrite(node.getExpression(), context.get()); + + if (node.getExpression() != expression) { + return new Cast(expression, node.getType(), node.isSafe(), node.isTypeOnly()); + } + + return node; + } + + @Override + protected Expression visitFieldReference(FieldReference node, Context context) { + if (!context.isDefaultRewrite()) { + Expression + result = + rewriter.rewriteFieldReference(node, context.get(), ExpressionTreeRewriter.this); + if (result != null) { + return result; + } + } + + return node; + } + + @Override + protected Expression visitSymbolReference(SymbolReference node, Context context) { + if (!context.isDefaultRewrite()) { + Expression + result = + rewriter.rewriteSymbolReference(node, context.get(), ExpressionTreeRewriter.this); + if (result != null) { + return result; + } + } + + return node; + } + } + + public static class Context { + + private final boolean defaultRewrite; + private final C context; + + private Context(C context, boolean defaultRewrite) { + this.context = context; + this.defaultRewrite = defaultRewrite; + } + + public C get() { + return context; + } + + public boolean isDefaultRewrite() { + return defaultRewrite; + } + } + + private static boolean sameElements(Optional a, Optional b) { + if (!a.isPresent() && !b.isPresent()) { + return true; + } else if (a.isPresent() != b.isPresent()) { + return false; + } + + return a.get() == b.get(); + } + + @SuppressWarnings("ObjectEquality") + private static boolean sameElements(Iterable a, Iterable b) { + if (Iterables.size(a) != Iterables.size(b)) { + return false; + } + + Iterator first = a.iterator(); + Iterator second = b.iterator(); + + while (first.hasNext() && second.hasNext()) { + if (first.next() != second.next()) { + return false; + } + } + + return true; + } +} diff --git a/ksql-core/src/main/java/io/confluent/ksql/parser/tree/Extract.java b/ksql-core/src/main/java/io/confluent/ksql/parser/tree/Extract.java new file mode 100644 index 000000000000..1e32636a0331 --- /dev/null +++ b/ksql-core/src/main/java/io/confluent/ksql/parser/tree/Extract.java @@ -0,0 +1,88 @@ +/** + * Copyright 2017 Confluent Inc. + **/ + +package io.confluent.ksql.parser.tree; + +import javax.annotation.concurrent.Immutable; +import java.util.Objects; +import java.util.Optional; + +import static java.util.Objects.requireNonNull; + +@Immutable +public class Extract + extends Expression { + + private final Expression expression; + private final Field field; + + public enum Field { + YEAR, + QUARTER, + MONTH, + WEEK, + DAY, + DAY_OF_MONTH, + DAY_OF_WEEK, + DOW, + DAY_OF_YEAR, + DOY, + YEAR_OF_WEEK, + YOW, + HOUR, + MINUTE, + SECOND, + TIMEZONE_MINUTE, + TIMEZONE_HOUR + } + + public Extract(Expression expression, Field field) { + this(Optional.empty(), expression, field); + } + + public Extract(NodeLocation location, Expression expression, Field field) { + this(Optional.of(location), expression, field); + } + + private Extract(Optional location, Expression expression, Field field) { + super(location); + requireNonNull(expression, "expression is null"); + requireNonNull(field, "field is null"); + + this.expression = expression; + this.field = field; + } + + public Expression getExpression() { + return expression; + } + + public Field getField() { + return field; + } + + @Override + public R accept(AstVisitor visitor, C context) { + return visitor.visitExtract(this, context); + } + + @Override + public boolean equals(Object o) { + if (this == o) { + return true; + } + if (o == null || getClass() != o.getClass()) { + return false; + } + + Extract that = (Extract) o; + return Objects.equals(expression, that.expression) + && (field == that.field); + } + + @Override + public int hashCode() { + return Objects.hash(expression, field); + } +} diff --git a/ksql-core/src/main/java/io/confluent/ksql/parser/tree/FieldReference.java b/ksql-core/src/main/java/io/confluent/ksql/parser/tree/FieldReference.java new file mode 100644 index 000000000000..719922834e85 --- /dev/null +++ b/ksql-core/src/main/java/io/confluent/ksql/parser/tree/FieldReference.java @@ -0,0 +1,50 @@ +/** + * Copyright 2017 Confluent Inc. + **/ + +package io.confluent.ksql.parser.tree; + +import java.util.Optional; + +import static com.google.common.base.Preconditions.checkArgument; + +public class FieldReference + extends Expression { + + private final int fieldIndex; + + public FieldReference(int fieldIndex) { + super(Optional.empty()); + checkArgument(fieldIndex >= 0, "fieldIndex must be >= 0"); + + this.fieldIndex = fieldIndex; + } + + public int getFieldIndex() { + return fieldIndex; + } + + @Override + public R accept(AstVisitor visitor, C context) { + return visitor.visitFieldReference(this, context); + } + + @Override + public boolean equals(Object o) { + if (this == o) { + return true; + } + if (o == null || getClass() != o.getClass()) { + return false; + } + + FieldReference that = (FieldReference) o; + + return fieldIndex == that.fieldIndex; + } + + @Override + public int hashCode() { + return fieldIndex; + } +} diff --git a/ksql-core/src/main/java/io/confluent/ksql/parser/tree/FrameBound.java b/ksql-core/src/main/java/io/confluent/ksql/parser/tree/FrameBound.java new file mode 100644 index 000000000000..b2b3c779f68e --- /dev/null +++ b/ksql-core/src/main/java/io/confluent/ksql/parser/tree/FrameBound.java @@ -0,0 +1,91 @@ +/** + * Copyright 2017 Confluent Inc. + **/ + +package io.confluent.ksql.parser.tree; + +import java.util.Objects; +import java.util.Optional; + +import static com.google.common.base.MoreObjects.toStringHelper; +import static java.util.Objects.requireNonNull; + +public class FrameBound + extends Node { + + public enum Type { + UNBOUNDED_PRECEDING, + PRECEDING, + CURRENT_ROW, + FOLLOWING, + UNBOUNDED_FOLLOWING + } + + private final Type type; + private final Optional value; + + public FrameBound(Type type) { + this(Optional.empty(), type); + } + + public FrameBound(NodeLocation location, Type type) { + this(Optional.of(location), type); + } + + public FrameBound(Type type, Expression value) { + this(Optional.empty(), type, value); + } + + private FrameBound(Optional location, Type type) { + this(location, type, null); + } + + public FrameBound(NodeLocation location, Type type, Expression value) { + this(Optional.of(location), type, value); + } + + private FrameBound(Optional location, Type type, Expression value) { + super(location); + this.type = requireNonNull(type, "type is null"); + this.value = Optional.ofNullable(value); + } + + public Type getType() { + return type; + } + + public Optional getValue() { + return value; + } + + @Override + public R accept(AstVisitor visitor, C context) { + return visitor.visitFrameBound(this, context); + } + + @Override + public boolean equals(Object obj) { + if (this == obj) { + return true; + } + if ((obj == null) || (getClass() != obj.getClass())) { + return false; + } + FrameBound o = (FrameBound) obj; + return Objects.equals(type, o.type) + && Objects.equals(value, o.value); + } + + @Override + public int hashCode() { + return Objects.hash(type, value); + } + + @Override + public String toString() { + return toStringHelper(this) + .add("type", type) + .add("value", value) + .toString(); + } +} diff --git a/ksql-core/src/main/java/io/confluent/ksql/parser/tree/FunctionCall.java b/ksql-core/src/main/java/io/confluent/ksql/parser/tree/FunctionCall.java new file mode 100644 index 000000000000..8110e780e141 --- /dev/null +++ b/ksql-core/src/main/java/io/confluent/ksql/parser/tree/FunctionCall.java @@ -0,0 +1,101 @@ +/** + * Copyright 2017 Confluent Inc. + **/ + +package io.confluent.ksql.parser.tree; + +import java.util.List; +import java.util.Objects; +import java.util.Optional; + +import static java.util.Objects.requireNonNull; + +public class FunctionCall + extends Expression { + + private final QualifiedName name; + private final Optional window; + private final boolean distinct; + private final List arguments; + + public FunctionCall(QualifiedName name, List arguments) { + this(Optional.empty(), name, Optional.empty(), false, arguments); + } + + public FunctionCall(NodeLocation location, QualifiedName name, List arguments) { + this(Optional.of(location), name, Optional.empty(), false, arguments); + } + + public FunctionCall(QualifiedName name, boolean distinct, List arguments) { + this(Optional.empty(), name, Optional.empty(), distinct, arguments); + } + + public FunctionCall(NodeLocation location, QualifiedName name, boolean distinct, + List arguments) { + this(Optional.of(location), name, Optional.empty(), distinct, arguments); + } + + public FunctionCall(QualifiedName name, Optional window, boolean distinct, + List arguments) { + this(Optional.empty(), name, window, distinct, arguments); + } + + public FunctionCall(NodeLocation location, QualifiedName name, Optional window, + boolean distinct, List arguments) { + this(Optional.of(location), name, window, distinct, arguments); + } + + private FunctionCall(Optional location, QualifiedName name, Optional window, + boolean distinct, List arguments) { + super(location); + requireNonNull(name, "name is null"); + requireNonNull(window, "window is null"); + requireNonNull(arguments, "arguments is null"); + + this.name = name; + this.window = window; + this.distinct = distinct; + this.arguments = arguments; + } + + public QualifiedName getName() { + return name; + } + + public Optional getWindow() { + return window; + } + + public boolean isDistinct() { + return distinct; + } + + public List getArguments() { + return arguments; + } + + @Override + public R accept(AstVisitor visitor, C context) { + return visitor.visitFunctionCall(this, context); + } + + @Override + public boolean equals(Object obj) { + if (this == obj) { + return true; + } + if ((obj == null) || (getClass() != obj.getClass())) { + return false; + } + FunctionCall o = (FunctionCall) obj; + return Objects.equals(name, o.name) + && Objects.equals(window, o.window) + && Objects.equals(distinct, o.distinct) + && Objects.equals(arguments, o.arguments); + } + + @Override + public int hashCode() { + return Objects.hash(name, distinct, window, arguments); + } +} diff --git a/ksql-core/src/main/java/io/confluent/ksql/parser/tree/GenericLiteral.java b/ksql-core/src/main/java/io/confluent/ksql/parser/tree/GenericLiteral.java new file mode 100644 index 000000000000..ca8d30fb96eb --- /dev/null +++ b/ksql-core/src/main/java/io/confluent/ksql/parser/tree/GenericLiteral.java @@ -0,0 +1,75 @@ +/** + * Copyright 2017 Confluent Inc. + **/ + +package io.confluent.ksql.parser.tree; + +import io.confluent.ksql.parser.ParsingException; + +import java.util.Objects; +import java.util.Optional; + +import static java.util.Objects.requireNonNull; + +public final class GenericLiteral + extends Literal { + + private final String type; + private final String value; + + public GenericLiteral(String type, String value) { + this(Optional.empty(), type, value); + } + + public GenericLiteral(NodeLocation location, String type, String value) { + this(Optional.of(location), type, value); + } + + private GenericLiteral(Optional location, String type, String value) { + super(location); + requireNonNull(type, "type is null"); + requireNonNull(value, "value is null"); + if (type.equalsIgnoreCase("X")) { + // we explicitly disallow "X" as type name, so if the user arrived here, + // it must be because that he intended to give a binaryLiteral instead, but + // added whitespace between the X and quote + throw new ParsingException( + "Spaces are not allowed between 'X' and the starting quote of a binary literal", + location.get()); + } + this.type = type; + this.value = value; + } + + public String getType() { + return type; + } + + public String getValue() { + return value; + } + + @Override + public R accept(AstVisitor visitor, C context) { + return visitor.visitGenericLiteral(this, context); + } + + @Override + public int hashCode() { + return Objects.hash(value, type); + } + + @Override + public boolean equals(Object obj) { + if (this == obj) { + return true; + } + if (obj == null || getClass() != obj.getClass()) { + return false; + } + + GenericLiteral other = (GenericLiteral) obj; + return Objects.equals(this.value, other.value) + && Objects.equals(this.type, other.type); + } +} diff --git a/ksql-core/src/main/java/io/confluent/ksql/parser/tree/GroupBy.java b/ksql-core/src/main/java/io/confluent/ksql/parser/tree/GroupBy.java new file mode 100644 index 000000000000..4d8bb49199ec --- /dev/null +++ b/ksql-core/src/main/java/io/confluent/ksql/parser/tree/GroupBy.java @@ -0,0 +1,76 @@ +/** + * Copyright 2017 Confluent Inc. + **/ + +package io.confluent.ksql.parser.tree; + +import com.google.common.collect.ImmutableList; + +import java.util.List; +import java.util.Objects; +import java.util.Optional; + +import static com.google.common.base.MoreObjects.toStringHelper; +import static java.util.Objects.requireNonNull; + +public class GroupBy + extends Node { + + private final boolean isDistinct; + private final List groupingElements; + + public GroupBy(boolean isDistinct, List groupingElements) { + this(Optional.empty(), isDistinct, groupingElements); + } + + public GroupBy(NodeLocation location, boolean isDistinct, + List groupingElements) { + this(Optional.of(location), isDistinct, groupingElements); + } + + private GroupBy(Optional location, boolean isDistinct, + List groupingElements) { + super(location); + this.isDistinct = isDistinct; + this.groupingElements = ImmutableList.copyOf(requireNonNull(groupingElements)); + } + + public boolean isDistinct() { + return isDistinct; + } + + public List getGroupingElements() { + return groupingElements; + } + + @Override + protected R accept(AstVisitor visitor, C context) { + return visitor.visitGroupBy(this, context); + } + + @Override + public boolean equals(Object o) { + if (this == o) { + return true; + } + if (o == null || getClass() != o.getClass()) { + return false; + } + GroupBy groupBy = (GroupBy) o; + return isDistinct == groupBy.isDistinct + && Objects.equals(groupingElements, groupBy.groupingElements); + } + + @Override + public int hashCode() { + return Objects.hash(isDistinct, groupingElements); + } + + @Override + public String toString() { + return toStringHelper(this) + .add("isDistinct", isDistinct) + .add("groupingElements", groupingElements) + .toString(); + } +} diff --git a/ksql-core/src/main/java/io/confluent/ksql/parser/tree/GroupingElement.java b/ksql-core/src/main/java/io/confluent/ksql/parser/tree/GroupingElement.java new file mode 100644 index 000000000000..086eef6cddc7 --- /dev/null +++ b/ksql-core/src/main/java/io/confluent/ksql/parser/tree/GroupingElement.java @@ -0,0 +1,24 @@ +/** + * Copyright 2017 Confluent Inc. + **/ + +package io.confluent.ksql.parser.tree; + +import java.util.List; +import java.util.Optional; +import java.util.Set; + +public abstract class GroupingElement + extends Node { + + public GroupingElement(Optional location) { + super(location); + } + + public abstract List> enumerateGroupingSets(); + + @Override + protected R accept(AstVisitor visitor, C context) { + return visitor.visitGroupingElement(this, context); + } +} diff --git a/ksql-core/src/main/java/io/confluent/ksql/parser/tree/GroupingSets.java b/ksql-core/src/main/java/io/confluent/ksql/parser/tree/GroupingSets.java new file mode 100644 index 000000000000..89530311a2ef --- /dev/null +++ b/ksql-core/src/main/java/io/confluent/ksql/parser/tree/GroupingSets.java @@ -0,0 +1,76 @@ +/** + * Copyright 2017 Confluent Inc. + **/ + +package io.confluent.ksql.parser.tree; + +import java.util.Collections; +import java.util.List; +import java.util.Objects; +import java.util.Optional; +import java.util.Set; +import java.util.stream.Collectors; + +import static com.google.common.base.MoreObjects.toStringHelper; +import static com.google.common.base.Preconditions.checkArgument; +import static java.util.Objects.requireNonNull; +import static java.util.stream.Collectors.collectingAndThen; + +public class GroupingSets + extends GroupingElement { + + private final List> sets; + + public GroupingSets(List> groupingSetList) { + this(Optional.empty(), groupingSetList); + } + + public GroupingSets(NodeLocation location, List> sets) { + this(Optional.of(location), sets); + } + + private GroupingSets(Optional location, List> sets) { + super(location); + requireNonNull(sets); + checkArgument(!sets.isEmpty(), "grouping sets cannot be empty"); + this.sets = sets; + } + + @Override + public List> enumerateGroupingSets() { + return sets.stream() + .map(groupingSet -> groupingSet.stream() + .map(QualifiedNameReference::new) + .collect(Collectors.toSet())) + .collect(collectingAndThen(Collectors.toList(), Collections::unmodifiableList)); + } + + @Override + protected R accept(AstVisitor visitor, C context) { + return visitor.visitGroupingSets(this, context); + } + + @Override + public boolean equals(Object o) { + if (this == o) { + return true; + } + if (o == null || getClass() != o.getClass()) { + return false; + } + GroupingSets groupingSets = (GroupingSets) o; + return Objects.equals(sets, groupingSets.sets); + } + + @Override + public int hashCode() { + return Objects.hash(sets); + } + + @Override + public String toString() { + return toStringHelper(this) + .add("sets", sets) + .toString(); + } +} diff --git a/ksql-core/src/main/java/io/confluent/ksql/parser/tree/HoppingWindowExpression.java b/ksql-core/src/main/java/io/confluent/ksql/parser/tree/HoppingWindowExpression.java new file mode 100644 index 000000000000..3d201148dca9 --- /dev/null +++ b/ksql-core/src/main/java/io/confluent/ksql/parser/tree/HoppingWindowExpression.java @@ -0,0 +1,80 @@ +/** + * Copyright 2017 Confluent Inc. + **/ + +package io.confluent.ksql.parser.tree; + +import java.util.Objects; +import java.util.Optional; + +public class HoppingWindowExpression extends KsqlWindowExpression { + + private final long size; + private final WindowExpression.WindowUnit sizeUnit; + private final long advanceBy; + private final WindowExpression.WindowUnit advanceByUnit; + + public HoppingWindowExpression(long size, WindowExpression.WindowUnit sizeUnit, + long advanceBy, WindowExpression.WindowUnit advanceByUnit) { + this(Optional.empty(), "", size, sizeUnit, advanceBy, advanceByUnit); + } + + public HoppingWindowExpression(NodeLocation location, String windowName, long size, + WindowExpression.WindowUnit + sizeUnit, + long advanceBy, WindowExpression.WindowUnit advanceByUnit) { + this(Optional.of(location), windowName, size, sizeUnit, advanceBy, advanceByUnit); + } + + private HoppingWindowExpression(Optional location, String windowName, long size, + WindowExpression.WindowUnit sizeUnit, + long advanceBy, WindowExpression.WindowUnit advanceByUnit) { + super(location); + this.size = size; + this.sizeUnit = sizeUnit; + this.advanceBy = advanceBy; + this.advanceByUnit = advanceByUnit; + } + + public long getSize() { + return size; + } + + public WindowExpression.WindowUnit getSizeUnit() { + return sizeUnit; + } + + public long getAdvanceBy() { + return advanceBy; + } + + public WindowExpression.WindowUnit getAdvanceByUnit() { + return advanceByUnit; + } + + @Override + public String toString() { + return " HOPPING ( SIZE " + size + " " + sizeUnit + " , ADVANCE BY " + + advanceBy + " " + + "" + advanceByUnit + " ) "; + } + + @Override + public int hashCode() { + return Objects.hash(size, sizeUnit, advanceBy, advanceByUnit); + } + + @Override + public boolean equals(Object o) { + if (this == o) { + return true; + } + if (o == null || getClass() != o.getClass()) { + return false; + } + HoppingWindowExpression hoppingWindowExpression = (HoppingWindowExpression) o; + return hoppingWindowExpression.size == size && hoppingWindowExpression.sizeUnit == sizeUnit + && hoppingWindowExpression.advanceBy == advanceBy && hoppingWindowExpression + .advanceByUnit == advanceByUnit; + } +} diff --git a/ksql-core/src/main/java/io/confluent/ksql/parser/tree/InListExpression.java b/ksql-core/src/main/java/io/confluent/ksql/parser/tree/InListExpression.java new file mode 100644 index 000000000000..a97446e134d1 --- /dev/null +++ b/ksql-core/src/main/java/io/confluent/ksql/parser/tree/InListExpression.java @@ -0,0 +1,55 @@ +/** + * Copyright 2017 Confluent Inc. + **/ + +package io.confluent.ksql.parser.tree; + +import java.util.List; +import java.util.Objects; +import java.util.Optional; + +public class InListExpression + extends Expression { + + private final List values; + + public InListExpression(List values) { + this(Optional.empty(), values); + } + + public InListExpression(NodeLocation location, List values) { + this(Optional.of(location), values); + } + + private InListExpression(Optional location, List values) { + super(location); + this.values = values; + } + + public List getValues() { + return values; + } + + @Override + public R accept(AstVisitor visitor, C context) { + return visitor.visitInListExpression(this, context); + } + + @Override + public boolean equals(Object o) { + if (this == o) { + return true; + } + if (o == null || getClass() != o.getClass()) { + return false; + } + + InListExpression that = (InListExpression) o; + return Objects.equals(values, that.values); + } + + @Override + public int hashCode() { + return values.hashCode(); + } +} diff --git a/ksql-core/src/main/java/io/confluent/ksql/parser/tree/InPredicate.java b/ksql-core/src/main/java/io/confluent/ksql/parser/tree/InPredicate.java new file mode 100644 index 000000000000..5669e807a594 --- /dev/null +++ b/ksql-core/src/main/java/io/confluent/ksql/parser/tree/InPredicate.java @@ -0,0 +1,61 @@ +/** + * Copyright 2017 Confluent Inc. + **/ + +package io.confluent.ksql.parser.tree; + +import java.util.Objects; +import java.util.Optional; + +public class InPredicate + extends Expression { + + private final Expression value; + private final Expression valueList; + + public InPredicate(Expression value, Expression valueList) { + this(Optional.empty(), value, valueList); + } + + public InPredicate(NodeLocation location, Expression value, Expression valueList) { + this(Optional.of(location), value, valueList); + } + + private InPredicate(Optional location, Expression value, Expression valueList) { + super(location); + this.value = value; + this.valueList = valueList; + } + + public Expression getValue() { + return value; + } + + public Expression getValueList() { + return valueList; + } + + @Override + public R accept(AstVisitor visitor, C context) { + return visitor.visitInPredicate(this, context); + } + + @Override + public boolean equals(Object o) { + if (this == o) { + return true; + } + if (o == null || getClass() != o.getClass()) { + return false; + } + + InPredicate that = (InPredicate) o; + return Objects.equals(value, that.value) + && Objects.equals(valueList, that.valueList); + } + + @Override + public int hashCode() { + return Objects.hash(value, valueList); + } +} diff --git a/ksql-core/src/main/java/io/confluent/ksql/parser/tree/Intersect.java b/ksql-core/src/main/java/io/confluent/ksql/parser/tree/Intersect.java new file mode 100644 index 000000000000..1e544c89d540 --- /dev/null +++ b/ksql-core/src/main/java/io/confluent/ksql/parser/tree/Intersect.java @@ -0,0 +1,70 @@ +/** + * Copyright 2017 Confluent Inc. + **/ + +package io.confluent.ksql.parser.tree; + +import com.google.common.collect.ImmutableList; + +import java.util.List; +import java.util.Objects; +import java.util.Optional; + +import static com.google.common.base.MoreObjects.toStringHelper; +import static java.util.Objects.requireNonNull; + +public class Intersect + extends SetOperation { + + private final List relations; + + public Intersect(List relations, boolean distinct) { + this(Optional.empty(), relations, distinct); + } + + public Intersect(NodeLocation location, List relations, boolean distinct) { + this(Optional.of(location), relations, distinct); + } + + private Intersect(Optional location, List relations, boolean distinct) { + super(location, distinct); + requireNonNull(relations, "relations is null"); + + this.relations = ImmutableList.copyOf(relations); + } + + public List getRelations() { + return relations; + } + + @Override + public R accept(AstVisitor visitor, C context) { + return visitor.visitIntersect(this, context); + } + + @Override + public String toString() { + return toStringHelper(this) + .add("relations", relations) + .add("distinct", isDistinct()) + .toString(); + } + + @Override + public boolean equals(Object obj) { + if (this == obj) { + return true; + } + if ((obj == null) || (getClass() != obj.getClass())) { + return false; + } + Intersect o = (Intersect) obj; + return Objects.equals(relations, o.relations) + && Objects.equals(isDistinct(), o.isDistinct()); + } + + @Override + public int hashCode() { + return Objects.hash(relations, isDistinct()); + } +} diff --git a/ksql-core/src/main/java/io/confluent/ksql/parser/tree/IntervalLiteral.java b/ksql-core/src/main/java/io/confluent/ksql/parser/tree/IntervalLiteral.java new file mode 100644 index 000000000000..a677fd70bfec --- /dev/null +++ b/ksql-core/src/main/java/io/confluent/ksql/parser/tree/IntervalLiteral.java @@ -0,0 +1,113 @@ +/** + * Copyright 2017 Confluent Inc. + **/ + +package io.confluent.ksql.parser.tree; + +import java.util.Objects; +import java.util.Optional; + +import static java.util.Objects.requireNonNull; + +public class IntervalLiteral + extends Literal { + + public enum Sign { + POSITIVE { + @Override + public int multiplier() { + return 1; + } + }, + NEGATIVE { + @Override + public int multiplier() { + return -1; + } + }; + + public abstract int multiplier(); + } + + public enum IntervalField { + YEAR, MONTH, DAY, HOUR, MINUTE, SECOND + } + + private final String value; + private final Sign sign; + private final IntervalField startField; + private final Optional endField; + + public IntervalLiteral(String value, Sign sign, IntervalField startField) { + this(Optional.empty(), value, sign, startField, Optional.empty()); + } + + public IntervalLiteral(String value, Sign sign, IntervalField startField, + Optional endField) { + this(Optional.empty(), value, sign, startField, endField); + } + + public IntervalLiteral(NodeLocation location, String value, Sign sign, IntervalField startField, + Optional endField) { + this(Optional.of(location), value, sign, startField, endField); + } + + private IntervalLiteral(Optional location, String value, Sign sign, + IntervalField startField, Optional endField) { + super(location); + requireNonNull(value, "value is null"); + requireNonNull(sign, "sign is null"); + requireNonNull(startField, "startField is null"); + requireNonNull(endField, "endField is null"); + + this.value = value; + this.sign = sign; + this.startField = startField; + this.endField = endField; + } + + public String getValue() { + return value; + } + + public Sign getSign() { + return sign; + } + + public IntervalField getStartField() { + return startField; + } + + public Optional getEndField() { + return endField; + } + + public boolean isYearToMonth() { + return startField == IntervalField.YEAR || startField == IntervalField.MONTH; + } + + @Override + public R accept(AstVisitor visitor, C context) { + return visitor.visitIntervalLiteral(this, context); + } + + @Override + public int hashCode() { + return Objects.hash(value, sign, startField, endField); + } + + @Override + public boolean equals(Object obj) { + if (this == obj) { + return true; + } + if (obj == null || getClass() != obj.getClass()) { + return false; + } + IntervalLiteral other = (IntervalLiteral) obj; + return Objects.equals(this.value, other.value) + && Objects.equals(this.sign, other.sign) + && Objects.equals(this.startField, other.startField) + && Objects.equals(this.endField, other.endField); + } +} diff --git a/ksql-core/src/main/java/io/confluent/ksql/parser/tree/IsNotNullPredicate.java b/ksql-core/src/main/java/io/confluent/ksql/parser/tree/IsNotNullPredicate.java new file mode 100644 index 000000000000..b9b42a708aca --- /dev/null +++ b/ksql-core/src/main/java/io/confluent/ksql/parser/tree/IsNotNullPredicate.java @@ -0,0 +1,57 @@ +/** + * Copyright 2017 Confluent Inc. + **/ + +package io.confluent.ksql.parser.tree; + +import java.util.Objects; +import java.util.Optional; + +import static java.util.Objects.requireNonNull; + +public class IsNotNullPredicate + extends Expression { + + private final Expression value; + + public IsNotNullPredicate(Expression value) { + this(Optional.empty(), value); + } + + public IsNotNullPredicate(NodeLocation location, Expression value) { + this(Optional.of(location), value); + } + + private IsNotNullPredicate(Optional location, Expression value) { + super(location); + requireNonNull(value, "value is null"); + this.value = value; + } + + public Expression getValue() { + return value; + } + + @Override + public R accept(AstVisitor visitor, C context) { + return visitor.visitIsNotNullPredicate(this, context); + } + + @Override + public boolean equals(Object o) { + if (this == o) { + return true; + } + if (o == null || getClass() != o.getClass()) { + return false; + } + + IsNotNullPredicate that = (IsNotNullPredicate) o; + return Objects.equals(value, that.value); + } + + @Override + public int hashCode() { + return value.hashCode(); + } +} diff --git a/ksql-core/src/main/java/io/confluent/ksql/parser/tree/IsNullPredicate.java b/ksql-core/src/main/java/io/confluent/ksql/parser/tree/IsNullPredicate.java new file mode 100644 index 000000000000..427a6c9ebd85 --- /dev/null +++ b/ksql-core/src/main/java/io/confluent/ksql/parser/tree/IsNullPredicate.java @@ -0,0 +1,57 @@ +/** + * Copyright 2017 Confluent Inc. + **/ + +package io.confluent.ksql.parser.tree; + +import java.util.Objects; +import java.util.Optional; + +import static java.util.Objects.requireNonNull; + +public class IsNullPredicate + extends Expression { + + private final Expression value; + + public IsNullPredicate(Expression value) { + this(Optional.empty(), value); + } + + public IsNullPredicate(NodeLocation location, Expression value) { + this(Optional.of(location), value); + } + + private IsNullPredicate(Optional location, Expression value) { + super(location); + requireNonNull(value, "value is null"); + this.value = value; + } + + public Expression getValue() { + return value; + } + + @Override + public R accept(AstVisitor visitor, C context) { + return visitor.visitIsNullPredicate(this, context); + } + + @Override + public boolean equals(Object o) { + if (this == o) { + return true; + } + if (o == null || getClass() != o.getClass()) { + return false; + } + + IsNullPredicate that = (IsNullPredicate) o; + return Objects.equals(value, that.value); + } + + @Override + public int hashCode() { + return value.hashCode(); + } +} diff --git a/ksql-core/src/main/java/io/confluent/ksql/parser/tree/Join.java b/ksql-core/src/main/java/io/confluent/ksql/parser/tree/Join.java new file mode 100644 index 000000000000..a6d5b0cbe65f --- /dev/null +++ b/ksql-core/src/main/java/io/confluent/ksql/parser/tree/Join.java @@ -0,0 +1,103 @@ +/** + * Copyright 2017 Confluent Inc. + **/ + +package io.confluent.ksql.parser.tree; + +import java.util.Objects; +import java.util.Optional; + +import static com.google.common.base.MoreObjects.toStringHelper; +import static com.google.common.base.Preconditions.checkArgument; +import static java.util.Objects.requireNonNull; + +public class Join + extends Relation { + + public Join(Type type, Relation left, Relation right, Optional criteria) { + this(Optional.empty(), type, left, right, criteria); + } + + public Join(NodeLocation location, Type type, Relation left, Relation right, + Optional criteria) { + this(Optional.of(location), type, left, right, criteria); + } + + private Join(Optional location, Type type, Relation left, Relation right, + Optional criteria) { + super(location); + requireNonNull(left, "left is null"); + requireNonNull(right, "right is null"); + if ((type == Type.CROSS) || (type == Type.IMPLICIT)) { + checkArgument(!criteria.isPresent(), "%s join cannot have join criteria", type); + } else { + checkArgument(criteria.isPresent(), "No join criteria specified"); + } + + this.type = type; + this.left = left; + this.right = right; + this.criteria = criteria; + } + + public enum Type { + CROSS, INNER, LEFT, RIGHT, FULL, IMPLICIT + } + + private final Type type; + private final Relation left; + private final Relation right; + private final Optional criteria; + + public Type getType() { + return type; + } + + public Relation getLeft() { + return left; + } + + public Relation getRight() { + return right; + } + + public Optional getCriteria() { + return criteria; + } + + @Override + public R accept(AstVisitor visitor, C context) { + return visitor.visitJoin(this, context); + } + + @Override + public String toString() { + return toStringHelper(this) + .add("type", type) + .add("left", left) + .add("right", right) + .add("criteria", criteria) + .omitNullValues() + .toString(); + } + + @Override + public boolean equals(Object o) { + if (this == o) { + return true; + } + if ((o == null) || (getClass() != o.getClass())) { + return false; + } + Join join = (Join) o; + return (type == join.type) + && Objects.equals(left, join.left) + && Objects.equals(right, join.right) + && Objects.equals(criteria, join.criteria); + } + + @Override + public int hashCode() { + return Objects.hash(type, left, right, criteria); + } +} diff --git a/ksql-core/src/main/java/io/confluent/ksql/parser/tree/JoinCriteria.java b/ksql-core/src/main/java/io/confluent/ksql/parser/tree/JoinCriteria.java new file mode 100644 index 000000000000..0a04ae4318db --- /dev/null +++ b/ksql-core/src/main/java/io/confluent/ksql/parser/tree/JoinCriteria.java @@ -0,0 +1,18 @@ +/** + * Copyright 2017 Confluent Inc. + **/ + +package io.confluent.ksql.parser.tree; + +public abstract class JoinCriteria { + + // Force subclasses to have a proper equals and hashcode implementation + @Override + public abstract boolean equals(Object obj); + + @Override + public abstract int hashCode(); + + @Override + public abstract String toString(); +} diff --git a/ksql-core/src/main/java/io/confluent/ksql/parser/tree/JoinOn.java b/ksql-core/src/main/java/io/confluent/ksql/parser/tree/JoinOn.java new file mode 100644 index 000000000000..cb835bc3dcb9 --- /dev/null +++ b/ksql-core/src/main/java/io/confluent/ksql/parser/tree/JoinOn.java @@ -0,0 +1,48 @@ +/** + * Copyright 2017 Confluent Inc. + **/ + +package io.confluent.ksql.parser.tree; + +import java.util.Objects; + +import static com.google.common.base.MoreObjects.toStringHelper; +import static java.util.Objects.requireNonNull; + +public class JoinOn + extends JoinCriteria { + + private final Expression expression; + + public JoinOn(Expression expression) { + this.expression = requireNonNull(expression, "expression is null"); + } + + public Expression getExpression() { + return expression; + } + + @Override + public boolean equals(Object obj) { + if (this == obj) { + return true; + } + if ((obj == null) || (getClass() != obj.getClass())) { + return false; + } + JoinOn o = (JoinOn) obj; + return Objects.equals(expression, o.expression); + } + + @Override + public int hashCode() { + return Objects.hash(expression); + } + + @Override + public String toString() { + return toStringHelper(this) + .addValue(expression) + .toString(); + } +} diff --git a/ksql-core/src/main/java/io/confluent/ksql/parser/tree/JoinUsing.java b/ksql-core/src/main/java/io/confluent/ksql/parser/tree/JoinUsing.java new file mode 100644 index 000000000000..9000d6e059eb --- /dev/null +++ b/ksql-core/src/main/java/io/confluent/ksql/parser/tree/JoinUsing.java @@ -0,0 +1,54 @@ +/** + * Copyright 2017 Confluent Inc. + **/ + +package io.confluent.ksql.parser.tree; + +import com.google.common.collect.ImmutableList; + +import java.util.List; +import java.util.Objects; + +import static com.google.common.base.MoreObjects.toStringHelper; +import static com.google.common.base.Preconditions.checkArgument; +import static java.util.Objects.requireNonNull; + +public class JoinUsing + extends JoinCriteria { + + private final List columns; + + public JoinUsing(List columns) { + requireNonNull(columns, "columns is null"); + checkArgument(!columns.isEmpty(), "columns is empty"); + this.columns = ImmutableList.copyOf(columns); + } + + public List getColumns() { + return columns; + } + + @Override + public boolean equals(Object obj) { + if (this == obj) { + return true; + } + if (obj == null || getClass() != obj.getClass()) { + return false; + } + JoinUsing o = (JoinUsing) obj; + return Objects.equals(columns, o.columns); + } + + @Override + public int hashCode() { + return Objects.hash(columns); + } + + @Override + public String toString() { + return toStringHelper(this) + .addValue(columns) + .toString(); + } +} diff --git a/ksql-core/src/main/java/io/confluent/ksql/parser/tree/KsqlWindowExpression.java b/ksql-core/src/main/java/io/confluent/ksql/parser/tree/KsqlWindowExpression.java new file mode 100644 index 000000000000..fc5d8fecb555 --- /dev/null +++ b/ksql-core/src/main/java/io/confluent/ksql/parser/tree/KsqlWindowExpression.java @@ -0,0 +1,14 @@ +/** + * Copyright 2017 Confluent Inc. + **/ + +package io.confluent.ksql.parser.tree; + +import java.util.Optional; + +public abstract class KsqlWindowExpression extends Node { + + protected KsqlWindowExpression(Optional location) { + super(location); + } +} diff --git a/ksql-core/src/main/java/io/confluent/ksql/parser/tree/LambdaExpression.java b/ksql-core/src/main/java/io/confluent/ksql/parser/tree/LambdaExpression.java new file mode 100644 index 000000000000..ec3bec3e23ad --- /dev/null +++ b/ksql-core/src/main/java/io/confluent/ksql/parser/tree/LambdaExpression.java @@ -0,0 +1,64 @@ +/** + * Copyright 2017 Confluent Inc. + **/ + +package io.confluent.ksql.parser.tree; + +import java.util.List; +import java.util.Objects; +import java.util.Optional; + +import static java.util.Objects.requireNonNull; + +public class LambdaExpression + extends Expression { + + private final List arguments; + private final Expression body; + + public LambdaExpression(List arguments, Expression body) { + this(Optional.empty(), arguments, body); + } + + public LambdaExpression(NodeLocation location, List arguments, Expression body) { + this(Optional.of(location), arguments, body); + } + + private LambdaExpression(Optional location, List arguments, + Expression body) { + super(location); + this.arguments = requireNonNull(arguments, "arguments is null"); + this.body = requireNonNull(body, "body is null"); + } + + public List getArguments() { + return arguments; + } + + public Expression getBody() { + return body; + } + + @Override + public R accept(AstVisitor visitor, C context) { + return visitor.visitLambdaExpression(this, context); + } + + @Override + public boolean equals(Object obj) { + if (this == obj) { + return true; + } + if (obj == null || getClass() != obj.getClass()) { + return false; + } + LambdaExpression that = (LambdaExpression) obj; + return Objects.equals(arguments, that.arguments) + && Objects.equals(body, that.body); + } + + @Override + public int hashCode() { + return Objects.hash(arguments, body); + } +} diff --git a/ksql-core/src/main/java/io/confluent/ksql/parser/tree/LikePredicate.java b/ksql-core/src/main/java/io/confluent/ksql/parser/tree/LikePredicate.java new file mode 100644 index 000000000000..1169dc92da00 --- /dev/null +++ b/ksql-core/src/main/java/io/confluent/ksql/parser/tree/LikePredicate.java @@ -0,0 +1,75 @@ +/** + * Copyright 2017 Confluent Inc. + **/ + +package io.confluent.ksql.parser.tree; + +import java.util.Objects; +import java.util.Optional; + +import static java.util.Objects.requireNonNull; + +public class LikePredicate + extends Expression { + + private final Expression value; + private final Expression pattern; + private final Expression escape; + + public LikePredicate(Expression value, Expression pattern, Expression escape) { + this(Optional.empty(), value, pattern, escape); + } + + public LikePredicate(NodeLocation location, Expression value, Expression pattern, + Expression escape) { + this(Optional.of(location), value, pattern, escape); + } + + private LikePredicate(Optional location, Expression value, Expression pattern, + Expression escape) { + super(location); + requireNonNull(value, "value is null"); + requireNonNull(pattern, "pattern is null"); + + this.value = value; + this.pattern = pattern; + this.escape = escape; + } + + public Expression getValue() { + return value; + } + + public Expression getPattern() { + return pattern; + } + + public Expression getEscape() { + return escape; + } + + @Override + public R accept(AstVisitor visitor, C context) { + return visitor.visitLikePredicate(this, context); + } + + @Override + public boolean equals(Object o) { + if (this == o) { + return true; + } + if (o == null || getClass() != o.getClass()) { + return false; + } + + LikePredicate that = (LikePredicate) o; + return Objects.equals(value, that.value) + && Objects.equals(pattern, that.pattern) + && Objects.equals(escape, that.escape); + } + + @Override + public int hashCode() { + return Objects.hash(value, pattern, escape); + } +} diff --git a/ksql-core/src/main/java/io/confluent/ksql/parser/tree/ListProperties.java b/ksql-core/src/main/java/io/confluent/ksql/parser/tree/ListProperties.java new file mode 100644 index 000000000000..9c4234344bff --- /dev/null +++ b/ksql-core/src/main/java/io/confluent/ksql/parser/tree/ListProperties.java @@ -0,0 +1,33 @@ +/** + * Copyright 2017 Confluent Inc. + **/ + +package io.confluent.ksql.parser.tree; + +import java.util.Objects; +import java.util.Optional; + +import static com.google.common.base.MoreObjects.toStringHelper; + +public class ListProperties extends Statement { + + public ListProperties(Optional location) { + super(location); + } + + @Override + public int hashCode() { + return Objects.hash("ListProperties"); + } + + @Override + public boolean equals(Object obj) { + return this == obj; + } + + @Override + public String toString() { + return toStringHelper(this) + .toString(); + } +} \ No newline at end of file diff --git a/ksql-core/src/main/java/io/confluent/ksql/parser/tree/ListQueries.java b/ksql-core/src/main/java/io/confluent/ksql/parser/tree/ListQueries.java new file mode 100644 index 000000000000..b6e6291f9dfd --- /dev/null +++ b/ksql-core/src/main/java/io/confluent/ksql/parser/tree/ListQueries.java @@ -0,0 +1,33 @@ +/** + * Copyright 2017 Confluent Inc. + **/ + +package io.confluent.ksql.parser.tree; + +import java.util.Objects; +import java.util.Optional; + +import static com.google.common.base.MoreObjects.toStringHelper; + +public class ListQueries extends Statement { + + public ListQueries(Optional location) { + super(location); + } + + @Override + public int hashCode() { + return Objects.hash("ShowQueries"); + } + + @Override + public boolean equals(Object obj) { + return this == obj; + } + + @Override + public String toString() { + return toStringHelper(this) + .toString(); + } +} diff --git a/ksql-core/src/main/java/io/confluent/ksql/parser/tree/ListRegisteredTopics.java b/ksql-core/src/main/java/io/confluent/ksql/parser/tree/ListRegisteredTopics.java new file mode 100644 index 000000000000..87bdb4807f8b --- /dev/null +++ b/ksql-core/src/main/java/io/confluent/ksql/parser/tree/ListRegisteredTopics.java @@ -0,0 +1,33 @@ +/** + * Copyright 2017 Confluent Inc. + **/ + +package io.confluent.ksql.parser.tree; + +import java.util.Objects; +import java.util.Optional; + +import static com.google.common.base.MoreObjects.toStringHelper; + +public class ListRegisteredTopics extends Statement { + + public ListRegisteredTopics(Optional location) { + super(location); + } + + @Override + public int hashCode() { + return Objects.hash("ListRegisteredTopics"); + } + + @Override + public boolean equals(Object obj) { + return this == obj; + } + + @Override + public String toString() { + return toStringHelper(this) + .toString(); + } +} diff --git a/ksql-core/src/main/java/io/confluent/ksql/parser/tree/ListStreams.java b/ksql-core/src/main/java/io/confluent/ksql/parser/tree/ListStreams.java new file mode 100644 index 000000000000..00c5a87cb8f7 --- /dev/null +++ b/ksql-core/src/main/java/io/confluent/ksql/parser/tree/ListStreams.java @@ -0,0 +1,34 @@ +/** + * Copyright 2017 Confluent Inc. + **/ + +package io.confluent.ksql.parser.tree; + +import java.util.Objects; +import java.util.Optional; + +import static com.google.common.base.MoreObjects.toStringHelper; + +public class ListStreams + extends Statement { + + public ListStreams(Optional location) { + super(location); + } + + @Override + public int hashCode() { + return Objects.hash("ListStreams"); + } + + @Override + public boolean equals(Object obj) { + return this == obj; + } + + @Override + public String toString() { + return toStringHelper(this) + .toString(); + } +} diff --git a/ksql-core/src/main/java/io/confluent/ksql/parser/tree/ListTables.java b/ksql-core/src/main/java/io/confluent/ksql/parser/tree/ListTables.java new file mode 100644 index 000000000000..69644343660f --- /dev/null +++ b/ksql-core/src/main/java/io/confluent/ksql/parser/tree/ListTables.java @@ -0,0 +1,32 @@ +/** + * Copyright 2017 Confluent Inc. + **/ + +package io.confluent.ksql.parser.tree; + +import java.util.Objects; +import java.util.Optional; + +import static com.google.common.base.MoreObjects.toStringHelper; + +public class ListTables extends Statement { + + public ListTables(Optional location) { + super(location); + } + + @Override + public int hashCode() { + return Objects.hash("ListTables"); + } + + @Override + public boolean equals(Object obj) { + return this == obj; + } + + @Override + public String toString() { + return toStringHelper(this).toString(); + } +} diff --git a/ksql-core/src/main/java/io/confluent/ksql/parser/tree/ListTopics.java b/ksql-core/src/main/java/io/confluent/ksql/parser/tree/ListTopics.java new file mode 100644 index 000000000000..7add86185398 --- /dev/null +++ b/ksql-core/src/main/java/io/confluent/ksql/parser/tree/ListTopics.java @@ -0,0 +1,33 @@ +/** + * Copyright 2017 Confluent Inc. + **/ + +package io.confluent.ksql.parser.tree; + +import java.util.Objects; +import java.util.Optional; + +import static com.google.common.base.MoreObjects.toStringHelper; + +public class ListTopics extends Statement { + + public ListTopics(Optional location) { + super(location); + } + + @Override + public int hashCode() { + return Objects.hash("ListTopics"); + } + + @Override + public boolean equals(Object obj) { + return this == obj; + } + + @Override + public String toString() { + return toStringHelper(this) + .toString(); + } +} diff --git a/ksql-core/src/main/java/io/confluent/ksql/parser/tree/Literal.java b/ksql-core/src/main/java/io/confluent/ksql/parser/tree/Literal.java new file mode 100644 index 000000000000..01e019135b3b --- /dev/null +++ b/ksql-core/src/main/java/io/confluent/ksql/parser/tree/Literal.java @@ -0,0 +1,20 @@ +/** + * Copyright 2017 Confluent Inc. + **/ + +package io.confluent.ksql.parser.tree; + +import java.util.Optional; + +public abstract class Literal + extends Expression { + + protected Literal(Optional location) { + super(location); + } + + @Override + public R accept(AstVisitor visitor, C context) { + return visitor.visitLiteral(this, context); + } +} diff --git a/ksql-core/src/main/java/io/confluent/ksql/parser/tree/LoadProperties.java b/ksql-core/src/main/java/io/confluent/ksql/parser/tree/LoadProperties.java new file mode 100644 index 000000000000..699a40142bda --- /dev/null +++ b/ksql-core/src/main/java/io/confluent/ksql/parser/tree/LoadProperties.java @@ -0,0 +1,33 @@ +/** + * Copyright 2017 Confluent Inc. + **/ + +package io.confluent.ksql.parser.tree; + +import java.util.Objects; +import java.util.Optional; + +import static com.google.common.base.MoreObjects.toStringHelper; + +public class LoadProperties extends Statement { + + public LoadProperties(Optional location) { + super(location); + } + + @Override + public int hashCode() { + return Objects.hash("LoadProperties"); + } + + @Override + public boolean equals(Object obj) { + return this == obj; + } + + @Override + public String toString() { + return toStringHelper(this) + .toString(); + } +} diff --git a/ksql-core/src/main/java/io/confluent/ksql/parser/tree/LogicalBinaryExpression.java b/ksql-core/src/main/java/io/confluent/ksql/parser/tree/LogicalBinaryExpression.java new file mode 100644 index 000000000000..826225ec5668 --- /dev/null +++ b/ksql-core/src/main/java/io/confluent/ksql/parser/tree/LogicalBinaryExpression.java @@ -0,0 +1,99 @@ +/** + * Copyright 2017 Confluent Inc. + **/ + +package io.confluent.ksql.parser.tree; + +import java.util.Objects; +import java.util.Optional; + +import static java.util.Objects.requireNonNull; + +public class LogicalBinaryExpression + extends Expression { + + public enum Type { + AND, OR; + + public Type flip() { + switch (this) { + case AND: + return LogicalBinaryExpression.Type.OR; + case OR: + return LogicalBinaryExpression.Type.AND; + default: + throw new IllegalArgumentException("Unsupported logical expression type: " + this); + } + } + } + + private final Type type; + private final Expression left; + private final Expression right; + + public LogicalBinaryExpression(Type type, Expression left, Expression right) { + this(Optional.empty(), type, left, right); + } + + public LogicalBinaryExpression(NodeLocation location, Type type, Expression left, + Expression right) { + this(Optional.of(location), type, left, right); + } + + private LogicalBinaryExpression(Optional location, Type type, Expression left, + Expression right) { + super(location); + requireNonNull(type, "type is null"); + requireNonNull(left, "left is null"); + requireNonNull(right, "right is null"); + + this.type = type; + this.left = left; + this.right = right; + } + + public Type getType() { + return type; + } + + public Expression getLeft() { + return left; + } + + public Expression getRight() { + return right; + } + + @Override + public R accept(AstVisitor visitor, C context) { + return visitor.visitLogicalBinaryExpression(this, context); + } + + public static LogicalBinaryExpression and(Expression left, Expression right) { + return new LogicalBinaryExpression(Optional.empty(), Type.AND, left, right); + } + + public static LogicalBinaryExpression or(Expression left, Expression right) { + return new LogicalBinaryExpression(Optional.empty(), Type.OR, left, right); + } + + @Override + public boolean equals(Object o) { + if (this == o) { + return true; + } + if (o == null || getClass() != o.getClass()) { + return false; + } + + LogicalBinaryExpression that = (LogicalBinaryExpression) o; + return type == that.type + && Objects.equals(left, that.left) + && Objects.equals(right, that.right); + } + + @Override + public int hashCode() { + return Objects.hash(type, left, right); + } +} diff --git a/ksql-core/src/main/java/io/confluent/ksql/parser/tree/LongLiteral.java b/ksql-core/src/main/java/io/confluent/ksql/parser/tree/LongLiteral.java new file mode 100644 index 000000000000..4768c3565c9e --- /dev/null +++ b/ksql-core/src/main/java/io/confluent/ksql/parser/tree/LongLiteral.java @@ -0,0 +1,67 @@ +/** + * Copyright 2017 Confluent Inc. + **/ + +package io.confluent.ksql.parser.tree; + +import io.confluent.ksql.parser.ParsingException; + +import java.util.Optional; + +import static java.util.Objects.requireNonNull; + +public class LongLiteral + extends Literal { + + private final long value; + + public LongLiteral(String value) { + this(Optional.empty(), value); + } + + public LongLiteral(NodeLocation location, String value) { + this(Optional.of(location), value); + } + + private LongLiteral(Optional location, String value) { + super(location); + requireNonNull(value, "value is null"); + try { + this.value = Long.parseLong(value); + } catch (NumberFormatException e) { + throw new ParsingException("Invalid numeric literal: " + value); + } + } + + public long getValue() { + return value; + } + + @Override + public R accept(AstVisitor visitor, C context) { + return visitor.visitLongLiteral(this, context); + } + + @Override + public boolean equals(Object o) { + if (this == o) { + return true; + } + if (o == null || getClass() != o.getClass()) { + return false; + } + + LongLiteral that = (LongLiteral) o; + + if (value != that.value) { + return false; + } + + return true; + } + + @Override + public int hashCode() { + return (int) (value ^ (value >>> 32)); + } +} diff --git a/ksql-core/src/main/java/io/confluent/ksql/parser/tree/NaturalJoin.java b/ksql-core/src/main/java/io/confluent/ksql/parser/tree/NaturalJoin.java new file mode 100644 index 000000000000..e4b99ad8fef3 --- /dev/null +++ b/ksql-core/src/main/java/io/confluent/ksql/parser/tree/NaturalJoin.java @@ -0,0 +1,29 @@ +/** + * Copyright 2017 Confluent Inc. + **/ + +package io.confluent.ksql.parser.tree; + +import static com.google.common.base.MoreObjects.toStringHelper; + +public class NaturalJoin + extends JoinCriteria { + + @Override + public boolean equals(Object obj) { + if (this == obj) { + return true; + } + return (obj != null) && (getClass() == obj.getClass()); + } + + @Override + public int hashCode() { + return 0; + } + + @Override + public String toString() { + return toStringHelper(this).toString(); + } +} diff --git a/ksql-core/src/main/java/io/confluent/ksql/parser/tree/Node.java b/ksql-core/src/main/java/io/confluent/ksql/parser/tree/Node.java new file mode 100644 index 000000000000..5eedc5386e99 --- /dev/null +++ b/ksql-core/src/main/java/io/confluent/ksql/parser/tree/Node.java @@ -0,0 +1,39 @@ +/** + * Copyright 2017 Confluent Inc. + **/ + +package io.confluent.ksql.parser.tree; + +import java.util.Optional; + +import static java.util.Objects.requireNonNull; + +public abstract class Node { + + private final Optional location; + + protected Node(Optional location) { + this.location = requireNonNull(location, "location is null"); + } + + /** + * Accessible for {@link AstVisitor}, use {@link AstVisitor#process(Node, Object)} instead. + */ + protected R accept(AstVisitor visitor, C context) { + return visitor.visitNode(this, context); + } + + public Optional getLocation() { + return location; + } + + // Force subclasses to have a proper equals and hashcode implementation + @Override + public abstract int hashCode(); + + @Override + public abstract boolean equals(Object obj); + + @Override + public abstract String toString(); +} diff --git a/ksql-core/src/main/java/io/confluent/ksql/parser/tree/NodeLocation.java b/ksql-core/src/main/java/io/confluent/ksql/parser/tree/NodeLocation.java new file mode 100644 index 000000000000..8bda327a1d09 --- /dev/null +++ b/ksql-core/src/main/java/io/confluent/ksql/parser/tree/NodeLocation.java @@ -0,0 +1,24 @@ +/** + * Copyright 2017 Confluent Inc. + **/ + +package io.confluent.ksql.parser.tree; + +public final class NodeLocation { + + private final int line; + private final int charPositionInLine; + + public NodeLocation(int line, int charPositionInLine) { + this.line = line; + this.charPositionInLine = charPositionInLine; + } + + public int getLineNumber() { + return line; + } + + public int getColumnNumber() { + return charPositionInLine + 1; + } +} diff --git a/ksql-core/src/main/java/io/confluent/ksql/parser/tree/NotExpression.java b/ksql-core/src/main/java/io/confluent/ksql/parser/tree/NotExpression.java new file mode 100644 index 000000000000..18cd56879c9f --- /dev/null +++ b/ksql-core/src/main/java/io/confluent/ksql/parser/tree/NotExpression.java @@ -0,0 +1,57 @@ +/** + * Copyright 2017 Confluent Inc. + **/ + +package io.confluent.ksql.parser.tree; + +import java.util.Objects; +import java.util.Optional; + +import static java.util.Objects.requireNonNull; + +public class NotExpression + extends Expression { + + private final Expression value; + + public NotExpression(Expression value) { + this(Optional.empty(), value); + } + + public NotExpression(NodeLocation location, Expression value) { + this(Optional.of(location), value); + } + + private NotExpression(Optional location, Expression value) { + super(location); + requireNonNull(value, "value is null"); + this.value = value; + } + + public Expression getValue() { + return value; + } + + @Override + public R accept(AstVisitor visitor, C context) { + return visitor.visitNotExpression(this, context); + } + + @Override + public boolean equals(Object o) { + if (this == o) { + return true; + } + if (o == null || getClass() != o.getClass()) { + return false; + } + + NotExpression that = (NotExpression) o; + return Objects.equals(value, that.value); + } + + @Override + public int hashCode() { + return value.hashCode(); + } +} diff --git a/ksql-core/src/main/java/io/confluent/ksql/parser/tree/NullIfExpression.java b/ksql-core/src/main/java/io/confluent/ksql/parser/tree/NullIfExpression.java new file mode 100644 index 000000000000..ba9453cde22d --- /dev/null +++ b/ksql-core/src/main/java/io/confluent/ksql/parser/tree/NullIfExpression.java @@ -0,0 +1,64 @@ +/** + * Copyright 2017 Confluent Inc. + **/ + +package io.confluent.ksql.parser.tree; + +import java.util.Objects; +import java.util.Optional; + +/** + * NULLIF(V1,V2): CASE WHEN V1=V2 THEN NULL ELSE V1 END + */ +public class NullIfExpression + extends Expression { + + private final Expression first; + private final Expression second; + + public NullIfExpression(Expression first, Expression second) { + this(Optional.empty(), first, second); + } + + public NullIfExpression(NodeLocation location, Expression first, Expression second) { + this(Optional.of(location), first, second); + } + + private NullIfExpression(Optional location, Expression first, Expression second) { + super(location); + this.first = first; + this.second = second; + } + + public Expression getFirst() { + return first; + } + + public Expression getSecond() { + return second; + } + + @Override + public R accept(AstVisitor visitor, C context) { + return visitor.visitNullIfExpression(this, context); + } + + @Override + public boolean equals(Object o) { + if (this == o) { + return true; + } + if (o == null || getClass() != o.getClass()) { + return false; + } + + NullIfExpression that = (NullIfExpression) o; + return Objects.equals(first, that.first) + && Objects.equals(second, that.second); + } + + @Override + public int hashCode() { + return Objects.hash(first, second); + } +} diff --git a/ksql-core/src/main/java/io/confluent/ksql/parser/tree/NullLiteral.java b/ksql-core/src/main/java/io/confluent/ksql/parser/tree/NullLiteral.java new file mode 100644 index 000000000000..b6112d472132 --- /dev/null +++ b/ksql-core/src/main/java/io/confluent/ksql/parser/tree/NullLiteral.java @@ -0,0 +1,41 @@ +/** + * Copyright 2017 Confluent Inc. + **/ + +package io.confluent.ksql.parser.tree; + +import java.util.Optional; + +public class NullLiteral + extends Literal { + + public NullLiteral() { + super(Optional.empty()); + } + + public NullLiteral(NodeLocation location) { + super(Optional.of(location)); + } + + @Override + public R accept(AstVisitor visitor, C context) { + return visitor.visitNullLiteral(this, context); + } + + @Override + public boolean equals(Object o) { + if (this == o) { + return true; + } + if (o == null || getClass() != o.getClass()) { + return false; + } + + return true; + } + + @Override + public int hashCode() { + return 0; + } +} diff --git a/ksql-core/src/main/java/io/confluent/ksql/parser/tree/PrintTopic.java b/ksql-core/src/main/java/io/confluent/ksql/parser/tree/PrintTopic.java new file mode 100644 index 000000000000..c3482e60dacd --- /dev/null +++ b/ksql-core/src/main/java/io/confluent/ksql/parser/tree/PrintTopic.java @@ -0,0 +1,81 @@ +/** + * Copyright 2017 Confluent Inc. + **/ + +package io.confluent.ksql.parser.tree; + +import java.util.Objects; +import java.util.Optional; + +import static com.google.common.base.MoreObjects.toStringHelper; +import static java.util.Objects.requireNonNull; + +public class PrintTopic extends Statement { + + private final QualifiedName topic; + private final boolean fromBeginning; + private final LongLiteral intervalValue; + + public PrintTopic(QualifiedName topic, boolean fromBeginning, LongLiteral intervalValue) { + this(Optional.empty(), topic, fromBeginning, intervalValue); + } + + public PrintTopic( + NodeLocation location, + QualifiedName topic, + boolean fromBeginning, + LongLiteral intervalValue + ) { + this(Optional.of(location), topic, fromBeginning, intervalValue); + } + + private PrintTopic( + Optional location, + QualifiedName topic, + boolean fromBeginning, + LongLiteral intervalValue + ) { + super(location); + this.topic = requireNonNull(topic, "table is null"); + this.fromBeginning = fromBeginning; + this.intervalValue = intervalValue; + } + + public QualifiedName getTopic() { + return topic; + } + + public boolean getFromBeginning() { + return fromBeginning; + } + + public LongLiteral getIntervalValue() { + return intervalValue; + } + + @Override + public boolean equals(Object o) { + if (this == o) { + return true; + } + if (!(o instanceof PrintTopic)) { + return false; + } + PrintTopic that = (PrintTopic) o; + return getFromBeginning() == that.getFromBeginning() + && Objects.equals(getTopic(), that.getTopic()) + && Objects.equals(getIntervalValue(), that.getIntervalValue()); + } + + @Override + public int hashCode() { + return Objects.hash(getTopic(), getFromBeginning(), getIntervalValue()); + } + + @Override + public String toString() { + return toStringHelper(this) + .add("topic", topic) + .toString(); + } +} diff --git a/ksql-core/src/main/java/io/confluent/ksql/parser/tree/QualifiedName.java b/ksql-core/src/main/java/io/confluent/ksql/parser/tree/QualifiedName.java new file mode 100644 index 000000000000..d621e79dcc83 --- /dev/null +++ b/ksql-core/src/main/java/io/confluent/ksql/parser/tree/QualifiedName.java @@ -0,0 +1,94 @@ +/** + * Copyright 2017 Confluent Inc. + **/ + +package io.confluent.ksql.parser.tree; + +import com.google.common.base.Joiner; +import com.google.common.collect.ImmutableList; +import com.google.common.collect.Iterables; +import com.google.common.collect.Lists; + +import java.util.List; +import java.util.Optional; + +import static com.google.common.base.Preconditions.checkArgument; +import static com.google.common.collect.Iterables.isEmpty; +import static java.util.Objects.requireNonNull; + +public class QualifiedName { + + private final List parts; + + public static QualifiedName of(String first, String... rest) { + requireNonNull(first, "first is null"); + return of(ImmutableList.copyOf(Lists.asList(first, rest))); + } + + public static QualifiedName of(String name) { + requireNonNull(name, "name is null"); + return of(ImmutableList.of(name)); + } + + public static QualifiedName of(Iterable parts) { + requireNonNull(parts, "parts is null"); + checkArgument(!isEmpty(parts), "parts is empty"); + return new QualifiedName(ImmutableList.copyOf(parts)); + } + + private QualifiedName(List parts) { + this.parts = parts; + } + + public List getParts() { + return parts; + } + + @Override + public String toString() { + return Joiner.on('.').join(parts); + } + + /** + * For an identifier of the form "a.b.c.d", returns "a.b.c" + * For an identifier of the form "a", returns absent + */ + public Optional getPrefix() { + if (parts.size() == 1) { + return Optional.empty(); + } + + List subList = parts.subList(0, parts.size() - 1); + return Optional.of(new QualifiedName(subList)); + } + + public boolean hasSuffix(QualifiedName suffix) { + if (parts.size() < suffix.getParts().size()) { + return false; + } + + int start = parts.size() - suffix.getParts().size(); + + return parts.subList(start, parts.size()).equals(suffix.getParts()); + } + + public String getSuffix() { + return Iterables.getLast(parts); + } + + @Override + public boolean equals(Object o) { + if (this == o) { + return true; + } + if (o == null || getClass() != o.getClass()) { + return false; + } + return parts.equals(((QualifiedName) o).parts); + } + + @Override + public int hashCode() { + return parts.hashCode(); + } +} diff --git a/ksql-core/src/main/java/io/confluent/ksql/parser/tree/QualifiedNameReference.java b/ksql-core/src/main/java/io/confluent/ksql/parser/tree/QualifiedNameReference.java new file mode 100644 index 000000000000..fed7b3cb4998 --- /dev/null +++ b/ksql-core/src/main/java/io/confluent/ksql/parser/tree/QualifiedNameReference.java @@ -0,0 +1,58 @@ +/** + * Copyright 2017 Confluent Inc. + **/ + +package io.confluent.ksql.parser.tree; + +import java.util.Objects; +import java.util.Optional; + +public class QualifiedNameReference + extends Expression { + + private final QualifiedName name; + + public QualifiedNameReference(QualifiedName name) { + this(Optional.empty(), name); + } + + public QualifiedNameReference(NodeLocation location, QualifiedName name) { + this(Optional.of(location), name); + } + + private QualifiedNameReference(Optional location, QualifiedName name) { + super(location); + this.name = name; + } + + public QualifiedName getName() { + return name; + } + + public QualifiedName getSuffix() { + return QualifiedName.of(name.getSuffix()); + } + + @Override + public R accept(AstVisitor visitor, C context) { + return visitor.visitQualifiedNameReference(this, context); + } + + @Override + public boolean equals(Object o) { + if (this == o) { + return true; + } + if (o == null || getClass() != o.getClass()) { + return false; + } + + QualifiedNameReference that = (QualifiedNameReference) o; + return Objects.equals(name, that.name); + } + + @Override + public int hashCode() { + return name.hashCode(); + } +} diff --git a/ksql-core/src/main/java/io/confluent/ksql/parser/tree/Query.java b/ksql-core/src/main/java/io/confluent/ksql/parser/tree/Query.java new file mode 100644 index 000000000000..e3173cbe948c --- /dev/null +++ b/ksql-core/src/main/java/io/confluent/ksql/parser/tree/Query.java @@ -0,0 +1,108 @@ +/** + * Copyright 2017 Confluent Inc. + **/ + +package io.confluent.ksql.parser.tree; + +import java.util.List; +import java.util.Objects; +import java.util.Optional; + +import static com.google.common.base.MoreObjects.toStringHelper; +import static java.util.Objects.requireNonNull; + +public class Query + extends Statement { + + private final Optional with; + private final QueryBody queryBody; + private final List orderBy; + private final Optional limit; + + public Query( + Optional with, + QueryBody queryBody, + List orderBy, + Optional limit) { + this(Optional.empty(), with, queryBody, orderBy, limit); + } + + public Query( + NodeLocation location, + Optional with, + QueryBody queryBody, + List orderBy, + Optional limit) { + this(Optional.of(location), with, queryBody, orderBy, limit); + } + + private Query( + Optional location, + Optional with, + QueryBody queryBody, + List orderBy, + Optional limit) { + super(location); + requireNonNull(with, "with is null"); + requireNonNull(queryBody, "queryBody is null"); + requireNonNull(orderBy, "orderBy is null"); + requireNonNull(limit, "limit is null"); + + this.with = with; + this.queryBody = queryBody; + this.orderBy = orderBy; + this.limit = limit; + } + + public Optional getWith() { + return with; + } + + public QueryBody getQueryBody() { + return queryBody; + } + + public List getOrderBy() { + return orderBy; + } + + public Optional getLimit() { + return limit; + } + + @Override + public R accept(AstVisitor visitor, C context) { + return visitor.visitQuery(this, context); + } + + @Override + public String toString() { + return toStringHelper(this) + .add("with", with.orElse(null)) + .add("queryBody", queryBody) + .add("orderBy", orderBy) + .add("limit", limit.orElse(null)) + .omitNullValues() + .toString(); + } + + @Override + public boolean equals(Object obj) { + if (this == obj) { + return true; + } + if ((obj == null) || (getClass() != obj.getClass())) { + return false; + } + Query o = (Query) obj; + return Objects.equals(with, o.with) + && Objects.equals(queryBody, o.queryBody) + && Objects.equals(orderBy, o.orderBy) + && Objects.equals(limit, o.limit); + } + + @Override + public int hashCode() { + return Objects.hash(with, queryBody, orderBy, limit); + } +} diff --git a/ksql-core/src/main/java/io/confluent/ksql/parser/tree/QueryBody.java b/ksql-core/src/main/java/io/confluent/ksql/parser/tree/QueryBody.java new file mode 100644 index 000000000000..dba82dd2d198 --- /dev/null +++ b/ksql-core/src/main/java/io/confluent/ksql/parser/tree/QueryBody.java @@ -0,0 +1,20 @@ +/** + * Copyright 2017 Confluent Inc. + **/ + +package io.confluent.ksql.parser.tree; + +import java.util.Optional; + +public abstract class QueryBody + extends Relation { + + protected QueryBody(Optional location) { + super(location); + } + + @Override + public R accept(AstVisitor visitor, C context) { + return visitor.visitQueryBody(this, context); + } +} diff --git a/ksql-core/src/main/java/io/confluent/ksql/parser/tree/QuerySpecification.java b/ksql-core/src/main/java/io/confluent/ksql/parser/tree/QuerySpecification.java new file mode 100644 index 000000000000..df00bc66137e --- /dev/null +++ b/ksql-core/src/main/java/io/confluent/ksql/parser/tree/QuerySpecification.java @@ -0,0 +1,166 @@ +/** + * Copyright 2017 Confluent Inc. + **/ + +package io.confluent.ksql.parser.tree; + +import java.util.List; +import java.util.Objects; +import java.util.Optional; + +import static com.google.common.base.MoreObjects.toStringHelper; +import static java.util.Objects.requireNonNull; + +public class QuerySpecification + extends QueryBody { + + private final Select select; + private final Optional into; + private final Optional from; + private final Optional windowExpression; + private final Optional where; + private final Optional groupBy; + private final Optional having; + private final List orderBy; + private final Optional limit; + + public QuerySpecification( + Select select, + Optional into, + Optional from, + Optional windowExpression, + Optional where, + Optional groupBy, + Optional having, + List orderBy, + Optional limit) { + this(Optional.empty(), select, into, from, windowExpression, where, groupBy, + having, orderBy, limit); + } + + public QuerySpecification( + NodeLocation location, + Select select, + Optional into, + Optional from, + Optional windowExpression, + Optional where, + Optional groupBy, + Optional having, + List orderBy, + Optional limit) { + this(Optional.of(location), select, into, from, windowExpression, where, groupBy, + having, orderBy, limit); + } + + private QuerySpecification( + Optional location, + Select select, + Optional into, + Optional from, + Optional windowExpression, + Optional where, + Optional groupBy, + Optional having, + List orderBy, + Optional limit) { + super(location); + requireNonNull(select, "select is null"); + requireNonNull(into, "into is null"); + requireNonNull(from, "from is null"); + requireNonNull(windowExpression, "window is null"); + requireNonNull(where, "where is null"); + requireNonNull(groupBy, "groupBy is null"); + requireNonNull(having, "having is null"); + requireNonNull(orderBy, "orderBy is null"); + requireNonNull(limit, "limit is null"); + + this.select = select; + this.into = into; + this.from = from; + this.windowExpression = windowExpression; + this.where = where; + this.groupBy = groupBy; + this.having = having; + this.orderBy = orderBy; + this.limit = limit; + } + + public Select getSelect() { + return select; + } + + public Optional getInto() { + return into; + } + + public Optional getFrom() { + return from; + } + + public Optional getWindowExpression() { + return windowExpression; + } + + public Optional getWhere() { + return where; + } + + public Optional getGroupBy() { + return groupBy; + } + + public Optional getHaving() { + return having; + } + + public List getOrderBy() { + return orderBy; + } + + public Optional getLimit() { + return limit; + } + + @Override + public R accept(AstVisitor visitor, C context) { + return visitor.visitQuerySpecification(this, context); + } + + @Override + public String toString() { + return toStringHelper(this) + .add("select", select) + .add("from", from) + .add("", windowExpression.orElse(null)) + .add("where", where.orElse(null)) + .add("groupBy", groupBy) + .add("having", having.orElse(null)) + .add("orderBy", orderBy) + .add("limit", limit.orElse(null)) + .toString(); + } + + @Override + public boolean equals(Object obj) { + if (this == obj) { + return true; + } + if ((obj == null) || (getClass() != obj.getClass())) { + return false; + } + QuerySpecification o = (QuerySpecification) obj; + return Objects.equals(select, o.select) + && Objects.equals(from, o.from) + && Objects.equals(where, o.where) + && Objects.equals(groupBy, o.groupBy) + && Objects.equals(having, o.having) + && Objects.equals(orderBy, o.orderBy) + && Objects.equals(limit, o.limit); + } + + @Override + public int hashCode() { + return Objects.hash(select, from, where, groupBy, having, orderBy, limit); + } +} diff --git a/ksql-core/src/main/java/io/confluent/ksql/parser/tree/RegisterTopic.java b/ksql-core/src/main/java/io/confluent/ksql/parser/tree/RegisterTopic.java new file mode 100644 index 000000000000..3c49faeef201 --- /dev/null +++ b/ksql-core/src/main/java/io/confluent/ksql/parser/tree/RegisterTopic.java @@ -0,0 +1,88 @@ +/** + * Copyright 2017 Confluent Inc. + * + **/ + +package io.confluent.ksql.parser.tree; + +import com.google.common.collect.ImmutableMap; + +import java.util.Map; +import java.util.Objects; +import java.util.Optional; + +import static com.google.common.base.MoreObjects.toStringHelper; +import static java.util.Objects.requireNonNull; + +public class RegisterTopic + extends Statement { + + private final QualifiedName name; + private final boolean notExists; + private final Map properties; + + public RegisterTopic(QualifiedName name, boolean notExists, + Map properties) { + this(Optional.empty(), name, notExists, properties); + } + + public RegisterTopic(NodeLocation location, QualifiedName name, + boolean notExists, Map properties) { + this(Optional.of(location), name, notExists, properties); + } + + private RegisterTopic(Optional location, QualifiedName name, + boolean notExists, + Map properties) { + super(location); + this.name = requireNonNull(name, "topic is null"); + this.notExists = notExists; + this.properties = ImmutableMap.copyOf(requireNonNull(properties, "properties is null")); + } + + public QualifiedName getName() { + return name; + } + + + public boolean isNotExists() { + return notExists; + } + + public Map getProperties() { + return properties; + } + + @Override + public R accept(AstVisitor visitor, C context) { + return visitor.visitCreateTopic(this, context); + } + + @Override + public int hashCode() { + return Objects.hash(name, notExists, properties); + } + + @Override + public boolean equals(Object obj) { + if (this == obj) { + return true; + } + if ((obj == null) || (getClass() != obj.getClass())) { + return false; + } + RegisterTopic o = (RegisterTopic) obj; + return Objects.equals(name, o.name) + && Objects.equals(notExists, o.notExists) + && Objects.equals(properties, o.properties); + } + + @Override + public String toString() { + return toStringHelper(this) + .add("name", name) + .add("notExists", notExists) + .add("properties", properties) + .toString(); + } +} diff --git a/ksql-core/src/main/java/io/confluent/ksql/parser/tree/Relation.java b/ksql-core/src/main/java/io/confluent/ksql/parser/tree/Relation.java new file mode 100644 index 000000000000..87def7ae7ed2 --- /dev/null +++ b/ksql-core/src/main/java/io/confluent/ksql/parser/tree/Relation.java @@ -0,0 +1,20 @@ +/** + * Copyright 2017 Confluent Inc. + **/ + +package io.confluent.ksql.parser.tree; + +import java.util.Optional; + +public abstract class Relation + extends Node { + + protected Relation(Optional location) { + super(location); + } + + @Override + public R accept(AstVisitor visitor, C context) { + return visitor.visitRelation(this, context); + } +} diff --git a/ksql-core/src/main/java/io/confluent/ksql/parser/tree/RenameColumn.java b/ksql-core/src/main/java/io/confluent/ksql/parser/tree/RenameColumn.java new file mode 100644 index 000000000000..73874fdbd224 --- /dev/null +++ b/ksql-core/src/main/java/io/confluent/ksql/parser/tree/RenameColumn.java @@ -0,0 +1,80 @@ +/** + * Copyright 2017 Confluent Inc. + **/ + +package io.confluent.ksql.parser.tree; + +import java.util.Objects; +import java.util.Optional; + +import static com.google.common.base.MoreObjects.toStringHelper; +import static java.util.Objects.requireNonNull; + +public class RenameColumn + extends Statement { + + private final QualifiedName table; + private final String source; + private final String target; + + public RenameColumn(QualifiedName table, String source, String target) { + this(Optional.empty(), table, source, target); + } + + public RenameColumn(NodeLocation location, QualifiedName table, String source, String target) { + this(Optional.of(location), table, source, target); + } + + private RenameColumn(Optional location, QualifiedName table, String source, + String target) { + super(location); + this.table = requireNonNull(table, "table is null"); + this.source = requireNonNull(source, "source is null"); + this.target = requireNonNull(target, "target is null"); + } + + public QualifiedName getTable() { + return table; + } + + public String getSource() { + return source; + } + + public String getTarget() { + return target; + } + + @Override + public R accept(AstVisitor visitor, C context) { + return visitor.visitRenameColumn(this, context); + } + + @Override + public boolean equals(Object o) { + if (this == o) { + return true; + } + if (o == null || getClass() != o.getClass()) { + return false; + } + RenameColumn that = (RenameColumn) o; + return Objects.equals(table, that.table) + && Objects.equals(source, that.source) + && Objects.equals(target, that.target); + } + + @Override + public int hashCode() { + return Objects.hash(table, source, target); + } + + @Override + public String toString() { + return toStringHelper(this) + .add("table", table) + .add("source", source) + .add("target", target) + .toString(); + } +} diff --git a/ksql-core/src/main/java/io/confluent/ksql/parser/tree/RenameTable.java b/ksql-core/src/main/java/io/confluent/ksql/parser/tree/RenameTable.java new file mode 100644 index 000000000000..8096e552d3b6 --- /dev/null +++ b/ksql-core/src/main/java/io/confluent/ksql/parser/tree/RenameTable.java @@ -0,0 +1,71 @@ +/** + * Copyright 2017 Confluent Inc. + **/ + +package io.confluent.ksql.parser.tree; + +import java.util.Objects; +import java.util.Optional; + +import static com.google.common.base.MoreObjects.toStringHelper; +import static java.util.Objects.requireNonNull; + +public final class RenameTable + extends Statement { + + private final QualifiedName source; + private final QualifiedName target; + + public RenameTable(QualifiedName source, QualifiedName target) { + this(Optional.empty(), source, target); + } + + public RenameTable(NodeLocation location, QualifiedName source, QualifiedName target) { + this(Optional.of(location), source, target); + } + + private RenameTable(Optional location, QualifiedName source, QualifiedName target) { + super(location); + this.source = requireNonNull(source, "source name is null"); + this.target = requireNonNull(target, "target name is null"); + } + + public QualifiedName getSource() { + return source; + } + + public QualifiedName getTarget() { + return target; + } + + @Override + public R accept(AstVisitor visitor, C context) { + return visitor.visitRenameTable(this, context); + } + + @Override + public int hashCode() { + return Objects.hash(source, target); + } + + @Override + public boolean equals(Object obj) { + if (this == obj) { + return true; + } + if ((obj == null) || (getClass() != obj.getClass())) { + return false; + } + RenameTable o = (RenameTable) obj; + return Objects.equals(source, o.source) + && Objects.equals(target, o.target); + } + + @Override + public String toString() { + return toStringHelper(this) + .add("source", source) + .add("target", target) + .toString(); + } +} diff --git a/ksql-core/src/main/java/io/confluent/ksql/parser/tree/Row.java b/ksql-core/src/main/java/io/confluent/ksql/parser/tree/Row.java new file mode 100644 index 000000000000..dd3591673ee3 --- /dev/null +++ b/ksql-core/src/main/java/io/confluent/ksql/parser/tree/Row.java @@ -0,0 +1,59 @@ +/** + * Copyright 2017 Confluent Inc. + **/ + +package io.confluent.ksql.parser.tree; + +import com.google.common.collect.ImmutableList; + +import java.util.List; +import java.util.Objects; +import java.util.Optional; + +import static java.util.Objects.requireNonNull; + +public final class Row + extends Expression { + + private final List items; + + public Row(List items) { + this(Optional.empty(), items); + } + + public Row(NodeLocation location, List items) { + this(Optional.of(location), items); + } + + private Row(Optional location, List items) { + super(location); + requireNonNull(items, "items is null"); + this.items = ImmutableList.copyOf(items); + } + + public List getItems() { + return items; + } + + @Override + public R accept(AstVisitor visitor, C context) { + return visitor.visitRow(this, context); + } + + @Override + public int hashCode() { + return Objects.hash(items); + } + + @Override + public boolean equals(Object obj) { + if (this == obj) { + return true; + } + if (obj == null || getClass() != obj.getClass()) { + return false; + } + Row other = (Row) obj; + return Objects.equals(this.items, other.items); + } +} diff --git a/ksql-core/src/main/java/io/confluent/ksql/parser/tree/RunScript.java b/ksql-core/src/main/java/io/confluent/ksql/parser/tree/RunScript.java new file mode 100644 index 000000000000..15ce71f73dcf --- /dev/null +++ b/ksql-core/src/main/java/io/confluent/ksql/parser/tree/RunScript.java @@ -0,0 +1,45 @@ +/** + * Copyright 2017 Confluent Inc. + **/ + +package io.confluent.ksql.parser.tree; + +import com.google.common.base.MoreObjects; + +import java.util.Objects; +import java.util.Optional; + +public class RunScript extends Statement { + + final String schemaFilePath; + + public RunScript(Optional location, String catalogFilePath) { + super(location); + if (catalogFilePath.startsWith("'") && catalogFilePath.endsWith("'")) { + this.schemaFilePath = catalogFilePath.substring(1, catalogFilePath.length() - 1); + } else { + this.schemaFilePath = catalogFilePath; + } + + } + + public String getSchemaFilePath() { + return schemaFilePath; + } + + @Override + public int hashCode() { + return Objects.hash("ListStreams"); + } + + @Override + public boolean equals(Object obj) { + return this == obj; + } + + @Override + public String toString() { + return MoreObjects.toStringHelper(this) + .toString(); + } +} diff --git a/ksql-core/src/main/java/io/confluent/ksql/parser/tree/SampledRelation.java b/ksql-core/src/main/java/io/confluent/ksql/parser/tree/SampledRelation.java new file mode 100644 index 000000000000..500db14c05b6 --- /dev/null +++ b/ksql-core/src/main/java/io/confluent/ksql/parser/tree/SampledRelation.java @@ -0,0 +1,113 @@ +/** + * Copyright 2017 Confluent Inc. + **/ + +package io.confluent.ksql.parser.tree; + +import com.google.common.collect.ImmutableList; + +import java.util.List; +import java.util.Objects; +import java.util.Optional; + +import static com.google.common.base.MoreObjects.toStringHelper; +import static java.util.Objects.requireNonNull; + +public class SampledRelation + extends Relation { + + public enum Type { + BERNOULLI, + POISSONIZED, + SYSTEM + } + + private final Relation relation; + private final Type type; + private final Expression samplePercentage; + private final boolean rescaled; + private final Optional> columnsToStratifyOn; + + public SampledRelation(Relation relation, Type type, Expression samplePercentage, + boolean rescaled, Optional> columnsToStratifyOn) { + this(Optional.empty(), relation, type, samplePercentage, rescaled, columnsToStratifyOn); + } + + public SampledRelation(NodeLocation location, Relation relation, Type type, + Expression samplePercentage, boolean rescaled, + Optional> columnsToStratifyOn) { + this(Optional.of(location), relation, type, samplePercentage, rescaled, columnsToStratifyOn); + } + + private SampledRelation(Optional location, Relation relation, Type type, + Expression samplePercentage, boolean rescaled, + Optional> columnsToStratifyOn) { + super(location); + this.relation = requireNonNull(relation, "relation is null"); + this.type = requireNonNull(type, "type is null"); + this.samplePercentage = requireNonNull(samplePercentage, "samplePercentage is null"); + this.rescaled = rescaled; + + if (columnsToStratifyOn.isPresent()) { + this.columnsToStratifyOn = + Optional.>of(ImmutableList.copyOf(columnsToStratifyOn.get())); + } else { + this.columnsToStratifyOn = columnsToStratifyOn; + } + } + + public Relation getRelation() { + return relation; + } + + public Type getType() { + return type; + } + + public Expression getSamplePercentage() { + return samplePercentage; + } + + public boolean isRescaled() { + return rescaled; + } + + public Optional> getColumnsToStratifyOn() { + return columnsToStratifyOn; + } + + @Override + public R accept(AstVisitor visitor, C context) { + return visitor.visitSampledRelation(this, context); + } + + @Override + public String toString() { + return toStringHelper(this) + .add("relation", relation) + .add("type", type) + .add("samplePercentage", samplePercentage) + .add("columnsToStratifyOn", columnsToStratifyOn) + .toString(); + } + + @Override + public boolean equals(Object o) { + if (this == o) { + return true; + } + if (o == null || getClass() != o.getClass()) { + return false; + } + SampledRelation that = (SampledRelation) o; + return Objects.equals(relation, that.relation) + && Objects.equals(type, that.type) + && Objects.equals(samplePercentage, that.samplePercentage) + && Objects.equals(columnsToStratifyOn, that.columnsToStratifyOn); + } + + @Override + public int hashCode() { + return Objects.hash(relation, type, samplePercentage, columnsToStratifyOn); + } +} diff --git a/ksql-core/src/main/java/io/confluent/ksql/parser/tree/SearchedCaseExpression.java b/ksql-core/src/main/java/io/confluent/ksql/parser/tree/SearchedCaseExpression.java new file mode 100644 index 000000000000..e617463819df --- /dev/null +++ b/ksql-core/src/main/java/io/confluent/ksql/parser/tree/SearchedCaseExpression.java @@ -0,0 +1,70 @@ +/** + * Copyright 2017 Confluent Inc. + **/ + +package io.confluent.ksql.parser.tree; + +import com.google.common.collect.ImmutableList; + +import java.util.List; +import java.util.Objects; +import java.util.Optional; + +import static java.util.Objects.requireNonNull; + +public class SearchedCaseExpression + extends Expression { + + private final List whenClauses; + private final Optional defaultValue; + + public SearchedCaseExpression(List whenClauses, Optional defaultValue) { + this(Optional.empty(), whenClauses, defaultValue); + } + + public SearchedCaseExpression(NodeLocation location, List whenClauses, + Optional defaultValue) { + this(Optional.of(location), whenClauses, defaultValue); + } + + private SearchedCaseExpression(Optional location, List whenClauses, + Optional defaultValue) { + super(location); + requireNonNull(whenClauses, "whenClauses is null"); + requireNonNull(defaultValue, "defaultValue is null"); + this.whenClauses = ImmutableList.copyOf(whenClauses); + this.defaultValue = defaultValue; + } + + public List getWhenClauses() { + return whenClauses; + } + + public Optional getDefaultValue() { + return defaultValue; + } + + @Override + public R accept(AstVisitor visitor, C context) { + return visitor.visitSearchedCaseExpression(this, context); + } + + @Override + public boolean equals(Object o) { + if (this == o) { + return true; + } + if (o == null || getClass() != o.getClass()) { + return false; + } + + SearchedCaseExpression that = (SearchedCaseExpression) o; + return Objects.equals(whenClauses, that.whenClauses) + && Objects.equals(defaultValue, that.defaultValue); + } + + @Override + public int hashCode() { + return Objects.hash(whenClauses, defaultValue); + } +} diff --git a/ksql-core/src/main/java/io/confluent/ksql/parser/tree/Select.java b/ksql-core/src/main/java/io/confluent/ksql/parser/tree/Select.java new file mode 100644 index 000000000000..376361b01ee1 --- /dev/null +++ b/ksql-core/src/main/java/io/confluent/ksql/parser/tree/Select.java @@ -0,0 +1,76 @@ +/** + * Copyright 2017 Confluent Inc. + **/ + +package io.confluent.ksql.parser.tree; + +import com.google.common.collect.ImmutableList; + +import java.util.List; +import java.util.Objects; +import java.util.Optional; + +import static com.google.common.base.MoreObjects.toStringHelper; +import static java.util.Objects.requireNonNull; + +public class Select + extends Node { + + private final boolean distinct; + private final List selectItems; + + public Select(boolean distinct, List selectItems) { + this(Optional.empty(), distinct, selectItems); + } + + public Select(NodeLocation location, boolean distinct, List selectItems) { + this(Optional.of(location), distinct, selectItems); + } + + private Select(Optional location, boolean distinct, List selectItems) { + super(location); + this.distinct = distinct; + this.selectItems = ImmutableList.copyOf(requireNonNull(selectItems, "selectItems")); + } + + public boolean isDistinct() { + return distinct; + } + + public List getSelectItems() { + return selectItems; + } + + @Override + public R accept(AstVisitor visitor, C context) { + return visitor.visitSelect(this, context); + } + + @Override + public String toString() { + return toStringHelper(this) + .add("distinct", distinct) + .add("selectItems", selectItems) + .omitNullValues() + .toString(); + } + + @Override + public boolean equals(Object o) { + if (this == o) { + return true; + } + if (o == null || getClass() != o.getClass()) { + return false; + } + + Select select = (Select) o; + return (distinct == select.distinct) + && Objects.equals(selectItems, select.selectItems); + } + + @Override + public int hashCode() { + return Objects.hash(distinct, selectItems); + } +} diff --git a/ksql-core/src/main/java/io/confluent/ksql/parser/tree/SelectItem.java b/ksql-core/src/main/java/io/confluent/ksql/parser/tree/SelectItem.java new file mode 100644 index 000000000000..3301406f3e3b --- /dev/null +++ b/ksql-core/src/main/java/io/confluent/ksql/parser/tree/SelectItem.java @@ -0,0 +1,15 @@ +/** + * Copyright 2017 Confluent Inc. + **/ + +package io.confluent.ksql.parser.tree; + +import java.util.Optional; + +public abstract class SelectItem + extends Node { + + protected SelectItem(Optional location) { + super(location); + } +} diff --git a/ksql-core/src/main/java/io/confluent/ksql/parser/tree/SessionWindowExpression.java b/ksql-core/src/main/java/io/confluent/ksql/parser/tree/SessionWindowExpression.java new file mode 100644 index 000000000000..fa12d8fbe8d7 --- /dev/null +++ b/ksql-core/src/main/java/io/confluent/ksql/parser/tree/SessionWindowExpression.java @@ -0,0 +1,60 @@ +/** + * Copyright 2017 Confluent Inc. + **/ + +package io.confluent.ksql.parser.tree; + +import java.util.Objects; +import java.util.Optional; + +public class SessionWindowExpression extends KsqlWindowExpression { + + private final long gap; + private final WindowExpression.WindowUnit sizeUnit; + + public SessionWindowExpression(long gap, WindowExpression.WindowUnit sizeUnit) { + this(Optional.empty(), "", gap, sizeUnit); + } + + public SessionWindowExpression(NodeLocation location, String windowName, + long gap, WindowExpression.WindowUnit sizeUnit) { + this(Optional.of(location), windowName, gap, sizeUnit); + } + + private SessionWindowExpression(Optional location, String windowName, long gap, + WindowExpression.WindowUnit sizeUnit) { + super(location); + this.gap = gap; + this.sizeUnit = sizeUnit; + } + + public long getGap() { + return gap; + } + + public WindowExpression.WindowUnit getSizeUnit() { + return sizeUnit; + } + + @Override + public String toString() { + return " SESSION ( " + gap + " " + sizeUnit + " ) "; + } + + @Override + public int hashCode() { + return Objects.hash(gap, sizeUnit); + } + + @Override + public boolean equals(Object o) { + if (this == o) { + return true; + } + if (o == null || getClass() != o.getClass()) { + return false; + } + SessionWindowExpression sessionWindowExpression = (SessionWindowExpression) o; + return sessionWindowExpression.gap == gap && sessionWindowExpression.sizeUnit == sizeUnit; + } +} diff --git a/ksql-core/src/main/java/io/confluent/ksql/parser/tree/SetOperation.java b/ksql-core/src/main/java/io/confluent/ksql/parser/tree/SetOperation.java new file mode 100644 index 000000000000..8112550d41d9 --- /dev/null +++ b/ksql-core/src/main/java/io/confluent/ksql/parser/tree/SetOperation.java @@ -0,0 +1,30 @@ +/** + * Copyright 2017 Confluent Inc. + **/ + +package io.confluent.ksql.parser.tree; + +import java.util.List; +import java.util.Optional; + +public abstract class SetOperation + extends QueryBody { + + private final boolean distinct; + + protected SetOperation(Optional location, boolean distinct) { + super(location); + this.distinct = distinct; + } + + public boolean isDistinct() { + return distinct; + } + + @Override + public R accept(AstVisitor visitor, C context) { + return visitor.visitSetOperation(this, context); + } + + public abstract List getRelations(); +} diff --git a/ksql-core/src/main/java/io/confluent/ksql/parser/tree/SetProperty.java b/ksql-core/src/main/java/io/confluent/ksql/parser/tree/SetProperty.java new file mode 100644 index 000000000000..10448601deff --- /dev/null +++ b/ksql-core/src/main/java/io/confluent/ksql/parser/tree/SetProperty.java @@ -0,0 +1,66 @@ +/** + * Copyright 2017 Confluent Inc. + **/ + +package io.confluent.ksql.parser.tree; + +import java.util.Objects; +import java.util.Optional; + +import static com.google.common.base.MoreObjects.toStringHelper; +import static java.util.Objects.requireNonNull; + +public class SetProperty extends Statement { + + private final String propertyName; + private final String propertyValue; + + + public SetProperty(Optional location, String propertyName, + String propertyValue) { + super(location); + requireNonNull(propertyName, "propertyName is null"); + requireNonNull(propertyValue, "propertyValue is null"); + this.propertyName = propertyName; + this.propertyValue = propertyValue; + } + + public String getPropertyName() { + return propertyName; + } + + public String getPropertyValue() { + return propertyValue; + } + + @Override + public int hashCode() { + return Objects.hash(propertyName, propertyValue); + } + + @Override + public boolean equals(Object o) { + if (this == o) { + return true; + } + if (o == null || getClass() != o.getClass()) { + return false; + } + + SetProperty setProperty = (SetProperty) o; + + if (!propertyName.equals(setProperty.propertyName)) { + return false; + } + if (!propertyValue.equals(setProperty.propertyValue)) { + return false; + } + + return true; + } + + @Override + public String toString() { + return toStringHelper(this).toString(); + } +} diff --git a/ksql-core/src/main/java/io/confluent/ksql/parser/tree/SetSession.java b/ksql-core/src/main/java/io/confluent/ksql/parser/tree/SetSession.java new file mode 100644 index 000000000000..d1f11fc257ed --- /dev/null +++ b/ksql-core/src/main/java/io/confluent/ksql/parser/tree/SetSession.java @@ -0,0 +1,70 @@ +/** + * Copyright 2017 Confluent Inc. + **/ + +package io.confluent.ksql.parser.tree; + +import java.util.Objects; +import java.util.Optional; + +import static com.google.common.base.MoreObjects.toStringHelper; + +public class SetSession + extends Statement { + + private final QualifiedName name; + private final Expression value; + + public SetSession(QualifiedName name, Expression value) { + this(Optional.empty(), name, value); + } + + public SetSession(NodeLocation location, QualifiedName name, Expression value) { + this(Optional.of(location), name, value); + } + + private SetSession(Optional location, QualifiedName name, Expression value) { + super(location); + this.name = name; + this.value = value; + } + + public QualifiedName getName() { + return name; + } + + public Expression getValue() { + return value; + } + + @Override + public R accept(AstVisitor visitor, C context) { + return visitor.visitSetSession(this, context); + } + + @Override + public int hashCode() { + return Objects.hash(name, value); + } + + @Override + public boolean equals(Object obj) { + if (this == obj) { + return true; + } + if ((obj == null) || (getClass() != obj.getClass())) { + return false; + } + SetSession o = (SetSession) obj; + return Objects.equals(name, o.name) + && Objects.equals(value, o.value); + } + + @Override + public String toString() { + return toStringHelper(this) + .add("name", name) + .add("value", value) + .toString(); + } +} diff --git a/ksql-core/src/main/java/io/confluent/ksql/parser/tree/ShowCatalogs.java b/ksql-core/src/main/java/io/confluent/ksql/parser/tree/ShowCatalogs.java new file mode 100644 index 000000000000..9558b876cc66 --- /dev/null +++ b/ksql-core/src/main/java/io/confluent/ksql/parser/tree/ShowCatalogs.java @@ -0,0 +1,56 @@ +/** + * Copyright 2017 Confluent Inc. + **/ + +package io.confluent.ksql.parser.tree; + +import java.util.Optional; + +import static com.google.common.base.MoreObjects.toStringHelper; +import static java.util.Objects.requireNonNull; + +public final class ShowCatalogs + extends Statement { + + private final Optional likePattern; + + public ShowCatalogs(Optional likePattern) { + this(Optional.empty(), likePattern); + } + + public ShowCatalogs(NodeLocation location, Optional likePattern) { + this(Optional.of(location), likePattern); + } + + public ShowCatalogs(Optional location, Optional likePattern) { + super(location); + this.likePattern = requireNonNull(likePattern, "likePattern is null"); + } + + public Optional getLikePattern() { + return likePattern; + } + + @Override + public R accept(AstVisitor visitor, C context) { + return visitor.visitShowCatalogs(this, context); + } + + @Override + public int hashCode() { + return 0; + } + + @Override + public boolean equals(Object obj) { + if (this == obj) { + return true; + } + return (obj != null) && (getClass() == obj.getClass()); + } + + @Override + public String toString() { + return toStringHelper(this).toString(); + } +} diff --git a/ksql-core/src/main/java/io/confluent/ksql/parser/tree/ShowColumns.java b/ksql-core/src/main/java/io/confluent/ksql/parser/tree/ShowColumns.java new file mode 100644 index 000000000000..c279bc8fe37f --- /dev/null +++ b/ksql-core/src/main/java/io/confluent/ksql/parser/tree/ShowColumns.java @@ -0,0 +1,69 @@ +/** + * Copyright 2017 Confluent Inc. + **/ + +package io.confluent.ksql.parser.tree; + +import java.util.Objects; +import java.util.Optional; + +import static com.google.common.base.MoreObjects.toStringHelper; +import static java.util.Objects.requireNonNull; + +public class ShowColumns + extends Statement { + + private final QualifiedName table; + private final boolean isTopic; + + public ShowColumns(QualifiedName table, boolean isTopic) { + this(Optional.empty(), table, isTopic); + } + + public ShowColumns(NodeLocation location, QualifiedName table, boolean isTopic) { + this(Optional.of(location), table, isTopic); + } + + private ShowColumns(Optional location, QualifiedName table, boolean isTopic) { + super(location); + this.table = requireNonNull(table, "table is null"); + this.isTopic = isTopic; + } + + public QualifiedName getTable() { + return table; + } + + public boolean isTopic() { + return isTopic; + } + + @Override + public R accept(AstVisitor visitor, C context) { + return visitor.visitShowColumns(this, context); + } + + @Override + public int hashCode() { + return Objects.hash(table); + } + + @Override + public boolean equals(Object obj) { + if (this == obj) { + return true; + } + if ((obj == null) || (getClass() != obj.getClass())) { + return false; + } + ShowColumns o = (ShowColumns) obj; + return Objects.equals(table, o.table); + } + + @Override + public String toString() { + return toStringHelper(this) + .add("table", table) + .toString(); + } +} diff --git a/ksql-core/src/main/java/io/confluent/ksql/parser/tree/ShowCreate.java b/ksql-core/src/main/java/io/confluent/ksql/parser/tree/ShowCreate.java new file mode 100644 index 000000000000..be577936bd24 --- /dev/null +++ b/ksql-core/src/main/java/io/confluent/ksql/parser/tree/ShowCreate.java @@ -0,0 +1,75 @@ +/** + * Copyright 2017 Confluent Inc. + **/ + +package io.confluent.ksql.parser.tree; + +import java.util.Objects; +import java.util.Optional; + +import static com.google.common.base.MoreObjects.toStringHelper; +import static java.util.Objects.requireNonNull; + +public class ShowCreate + extends Statement { + + public enum Type { + TABLE, + VIEW + } + + private final Type type; + private final QualifiedName name; + + public ShowCreate(Type type, QualifiedName name) { + this(Optional.empty(), type, name); + } + + public ShowCreate(NodeLocation location, Type type, QualifiedName name) { + this(Optional.of(location), type, name); + } + + private ShowCreate(Optional location, Type type, QualifiedName name) { + super(location); + this.type = requireNonNull(type, "type is null"); + this.name = requireNonNull(name, "name is null"); + } + + public QualifiedName getName() { + return name; + } + + public Type getType() { + return type; + } + + @Override + public R accept(AstVisitor visitor, C context) { + return visitor.visitShowCreate(this, context); + } + + @Override + public int hashCode() { + return Objects.hash(type, name); + } + + @Override + public boolean equals(Object obj) { + if (this == obj) { + return true; + } + if ((obj == null) || (getClass() != obj.getClass())) { + return false; + } + ShowCreate o = (ShowCreate) obj; + return Objects.equals(name, o.name) && Objects.equals(type, o.type); + } + + @Override + public String toString() { + return toStringHelper(this) + .add("type", type) + .add("name", name) + .toString(); + } +} diff --git a/ksql-core/src/main/java/io/confluent/ksql/parser/tree/ShowFunctions.java b/ksql-core/src/main/java/io/confluent/ksql/parser/tree/ShowFunctions.java new file mode 100644 index 000000000000..a9dd38fd2ec5 --- /dev/null +++ b/ksql-core/src/main/java/io/confluent/ksql/parser/tree/ShowFunctions.java @@ -0,0 +1,48 @@ +/** + * Copyright 2017 Confluent Inc. + **/ + +package io.confluent.ksql.parser.tree; + +import java.util.Optional; + +import static com.google.common.base.MoreObjects.toStringHelper; + +public class ShowFunctions + extends Statement { + + public ShowFunctions() { + this(Optional.empty()); + } + + public ShowFunctions(NodeLocation location) { + this(Optional.of(location)); + } + + private ShowFunctions(Optional location) { + super(location); + } + + @Override + public R accept(AstVisitor visitor, C context) { + return visitor.visitShowFunctions(this, context); + } + + @Override + public int hashCode() { + return 0; + } + + @Override + public boolean equals(Object obj) { + if (this == obj) { + return true; + } + return (obj != null) && (getClass() == obj.getClass()); + } + + @Override + public String toString() { + return toStringHelper(this).toString(); + } +} diff --git a/ksql-core/src/main/java/io/confluent/ksql/parser/tree/ShowPartitions.java b/ksql-core/src/main/java/io/confluent/ksql/parser/tree/ShowPartitions.java new file mode 100644 index 000000000000..dc7ebc3128f5 --- /dev/null +++ b/ksql-core/src/main/java/io/confluent/ksql/parser/tree/ShowPartitions.java @@ -0,0 +1,94 @@ +/** + * Copyright 2017 Confluent Inc. + **/ + +package io.confluent.ksql.parser.tree; + +import com.google.common.collect.ImmutableList; + +import java.util.List; +import java.util.Objects; +import java.util.Optional; + +import static com.google.common.base.MoreObjects.toStringHelper; +import static java.util.Objects.requireNonNull; + +public class ShowPartitions + extends Statement { + + private final QualifiedName table; + private final Optional where; + private final List orderBy; + private final Optional limit; + + public ShowPartitions(QualifiedName table, Optional where, List orderBy, + Optional limit) { + this(Optional.empty(), table, where, orderBy, limit); + } + + public ShowPartitions(NodeLocation location, QualifiedName table, Optional where, + List orderBy, Optional limit) { + this(Optional.of(location), table, where, orderBy, limit); + } + + private ShowPartitions(Optional location, QualifiedName table, + Optional where, List orderBy, + Optional limit) { + super(location); + this.table = requireNonNull(table, "table is null"); + this.where = requireNonNull(where, "where is null"); + this.orderBy = ImmutableList.copyOf(requireNonNull(orderBy, "orderBy is null")); + this.limit = requireNonNull(limit, "limit is null"); + } + + public QualifiedName getTable() { + return table; + } + + public Optional getWhere() { + return where; + } + + public List getOrderBy() { + return orderBy; + } + + public Optional getLimit() { + return limit; + } + + @Override + public R accept(AstVisitor visitor, C context) { + return visitor.visitShowPartitions(this, context); + } + + @Override + public int hashCode() { + return Objects.hash(table, where, orderBy, limit); + } + + @Override + public boolean equals(Object obj) { + if (this == obj) { + return true; + } + if ((obj == null) || (getClass() != obj.getClass())) { + return false; + } + ShowPartitions o = (ShowPartitions) obj; + return Objects.equals(table, o.table) + && Objects.equals(where, o.where) + && Objects.equals(orderBy, o.orderBy) + && Objects.equals(limit, o.limit); + } + + @Override + public String toString() { + return toStringHelper(this) + .add("table", table) + .add("where", where) + .add("orderBy", orderBy) + .add("limit", limit) + .toString(); + } +} diff --git a/ksql-core/src/main/java/io/confluent/ksql/parser/tree/ShowSchemas.java b/ksql-core/src/main/java/io/confluent/ksql/parser/tree/ShowSchemas.java new file mode 100644 index 000000000000..b5a25aaa0f87 --- /dev/null +++ b/ksql-core/src/main/java/io/confluent/ksql/parser/tree/ShowSchemas.java @@ -0,0 +1,71 @@ +/** + * Copyright 2017 Confluent Inc. + **/ + +package io.confluent.ksql.parser.tree; + +import java.util.Objects; +import java.util.Optional; + +import static com.google.common.base.MoreObjects.toStringHelper; +import static java.util.Objects.requireNonNull; + +public class ShowSchemas + extends Statement { + + private final Optional catalog; + private final Optional likePattern; + + public ShowSchemas(Optional catalog, Optional likePattern) { + this(Optional.empty(), catalog, likePattern); + } + + public ShowSchemas(NodeLocation location, Optional catalog, + Optional likePattern) { + this(Optional.of(location), catalog, likePattern); + } + + private ShowSchemas(Optional location, Optional catalog, + Optional likePattern) { + super(location); + this.catalog = requireNonNull(catalog, "catalog is null"); + this.likePattern = requireNonNull(likePattern, "likePattern is null"); + } + + public Optional getCatalog() { + return catalog; + } + + public Optional getLikePattern() { + return likePattern; + } + + @Override + public R accept(AstVisitor visitor, C context) { + return visitor.visitShowSchemas(this, context); + } + + @Override + public int hashCode() { + return catalog.hashCode(); + } + + @Override + public boolean equals(Object obj) { + if (this == obj) { + return true; + } + if ((obj == null) || (getClass() != obj.getClass())) { + return false; + } + ShowSchemas o = (ShowSchemas) obj; + return Objects.equals(catalog, o.catalog); + } + + @Override + public String toString() { + return toStringHelper(this) + .add("catalog", catalog) + .toString(); + } +} diff --git a/ksql-core/src/main/java/io/confluent/ksql/parser/tree/ShowSession.java b/ksql-core/src/main/java/io/confluent/ksql/parser/tree/ShowSession.java new file mode 100644 index 000000000000..59cb933faf93 --- /dev/null +++ b/ksql-core/src/main/java/io/confluent/ksql/parser/tree/ShowSession.java @@ -0,0 +1,48 @@ +/** + * Copyright 2017 Confluent Inc. + **/ + +package io.confluent.ksql.parser.tree; + +import java.util.Optional; + +import static com.google.common.base.MoreObjects.toStringHelper; + +public class ShowSession + extends Statement { + + public ShowSession() { + this(Optional.empty()); + } + + public ShowSession(NodeLocation location) { + this(Optional.of(location)); + } + + private ShowSession(Optional location) { + super(location); + } + + @Override + public R accept(AstVisitor visitor, C context) { + return visitor.visitShowSession(this, context); + } + + @Override + public int hashCode() { + return 0; + } + + @Override + public boolean equals(Object obj) { + if (this == obj) { + return true; + } + return (obj != null) && (getClass() == obj.getClass()); + } + + @Override + public String toString() { + return toStringHelper(this).toString(); + } +} diff --git a/ksql-core/src/main/java/io/confluent/ksql/parser/tree/SimpleCaseExpression.java b/ksql-core/src/main/java/io/confluent/ksql/parser/tree/SimpleCaseExpression.java new file mode 100644 index 000000000000..9ec94ec026e9 --- /dev/null +++ b/ksql-core/src/main/java/io/confluent/ksql/parser/tree/SimpleCaseExpression.java @@ -0,0 +1,79 @@ +/** + * Copyright 2017 Confluent Inc. + **/ + +package io.confluent.ksql.parser.tree; + +import com.google.common.collect.ImmutableList; + +import java.util.List; +import java.util.Objects; +import java.util.Optional; + +import static java.util.Objects.requireNonNull; + +public class SimpleCaseExpression + extends Expression { + + private final Expression operand; + private final List whenClauses; + private final Optional defaultValue; + + public SimpleCaseExpression(Expression operand, List whenClauses, + Optional defaultValue) { + this(Optional.empty(), operand, whenClauses, defaultValue); + } + + public SimpleCaseExpression(NodeLocation location, Expression operand, + List whenClauses, Optional defaultValue) { + this(Optional.of(location), operand, whenClauses, defaultValue); + } + + private SimpleCaseExpression(Optional location, Expression operand, + List whenClauses, Optional defaultValue) { + super(location); + requireNonNull(operand, "operand is null"); + requireNonNull(whenClauses, "whenClauses is null"); + + this.operand = operand; + this.whenClauses = ImmutableList.copyOf(whenClauses); + this.defaultValue = defaultValue; + } + + public Expression getOperand() { + return operand; + } + + public List getWhenClauses() { + return whenClauses; + } + + public Optional getDefaultValue() { + return defaultValue; + } + + @Override + public R accept(AstVisitor visitor, C context) { + return visitor.visitSimpleCaseExpression(this, context); + } + + @Override + public boolean equals(Object o) { + if (this == o) { + return true; + } + if (o == null || getClass() != o.getClass()) { + return false; + } + + SimpleCaseExpression that = (SimpleCaseExpression) o; + return Objects.equals(operand, that.operand) + && Objects.equals(whenClauses, that.whenClauses) + && Objects.equals(defaultValue, that.defaultValue); + } + + @Override + public int hashCode() { + return Objects.hash(operand, whenClauses, defaultValue); + } +} diff --git a/ksql-core/src/main/java/io/confluent/ksql/parser/tree/SimpleGroupBy.java b/ksql-core/src/main/java/io/confluent/ksql/parser/tree/SimpleGroupBy.java new file mode 100644 index 000000000000..6b6f9bb8dad3 --- /dev/null +++ b/ksql-core/src/main/java/io/confluent/ksql/parser/tree/SimpleGroupBy.java @@ -0,0 +1,74 @@ +/** + * Copyright 2017 Confluent Inc. + **/ + +package io.confluent.ksql.parser.tree; + +import com.google.common.collect.ImmutableList; +import com.google.common.collect.ImmutableSet; + +import java.util.List; +import java.util.Objects; +import java.util.Optional; +import java.util.Set; + +import static com.google.common.base.MoreObjects.toStringHelper; +import static java.util.Objects.requireNonNull; + +public class SimpleGroupBy + extends GroupingElement { + + private final List columns; + + public SimpleGroupBy(List simpleGroupByExpressions) { + this(Optional.empty(), simpleGroupByExpressions); + } + + public SimpleGroupBy(NodeLocation location, List simpleGroupByExpressions) { + this(Optional.of(location), simpleGroupByExpressions); + } + + private SimpleGroupBy(Optional location, + List simpleGroupByExpressions) { + super(location); + this.columns = requireNonNull(simpleGroupByExpressions); + } + + public List getColumnExpressions() { + return columns; + } + + @Override + public List> enumerateGroupingSets() { + return ImmutableList.of(ImmutableSet.copyOf(columns)); + } + + @Override + protected R accept(AstVisitor visitor, C context) { + return visitor.visitSimpleGroupBy(this, context); + } + + @Override + public boolean equals(Object o) { + if (this == o) { + return true; + } + if (o == null || getClass() != o.getClass()) { + return false; + } + SimpleGroupBy that = (SimpleGroupBy) o; + return Objects.equals(columns, that.columns); + } + + @Override + public int hashCode() { + return Objects.hash(columns); + } + + @Override + public String toString() { + return toStringHelper(this) + .add("columns", columns) + .toString(); + } +} diff --git a/ksql-core/src/main/java/io/confluent/ksql/parser/tree/SingleColumn.java b/ksql-core/src/main/java/io/confluent/ksql/parser/tree/SingleColumn.java new file mode 100644 index 000000000000..94796bc737aa --- /dev/null +++ b/ksql-core/src/main/java/io/confluent/ksql/parser/tree/SingleColumn.java @@ -0,0 +1,108 @@ +/** + * Copyright 2017 Confluent Inc. + **/ + +package io.confluent.ksql.parser.tree; + +import io.confluent.ksql.util.KsqlException; +import io.confluent.ksql.util.SchemaUtil; + +import java.util.Objects; +import java.util.Optional; + +import static java.util.Objects.requireNonNull; + +public class SingleColumn + extends SelectItem { + + private final Optional alias; + private final Expression expression; + + public SingleColumn(Expression expression) { + this(Optional.empty(), expression, Optional.empty()); + } + + public SingleColumn(Expression expression, Optional alias) { + this(Optional.empty(), expression, alias); + } + + public SingleColumn(Expression expression, String alias) { + this(Optional.empty(), expression, Optional.of(alias)); + } + + public SingleColumn(NodeLocation location, Expression expression, Optional alias) { + this(Optional.of(location), expression, alias); + } + + private SingleColumn(Optional location, Expression expression, + Optional alias) { + super(location); + requireNonNull(expression, "expression is null"); + requireNonNull(alias, "alias is null"); + + if (alias.isPresent()) { + if (alias.get().equalsIgnoreCase(SchemaUtil.ROWTIME_NAME)) { + String expressionStr = expression.toString(); + if (!expressionStr.substring(expressionStr.indexOf(".") + 1) + .equalsIgnoreCase(SchemaUtil.ROWTIME_NAME)) { + throw new KsqlException( + SchemaUtil.ROWTIME_NAME + " is a reserved token for implicit column." + + " You cannot use it as an alias for a column."); + } + } + if (alias.get().equalsIgnoreCase(SchemaUtil.ROWKEY_NAME)) { + String expressionStr = expression.toString(); + if (!expressionStr.substring(expressionStr.indexOf(".") + 1).equalsIgnoreCase( + SchemaUtil.ROWKEY_NAME)) { + throw new KsqlException( + SchemaUtil.ROWKEY_NAME + " is a reserved token for implicit column." + + " You cannot use it as an alias for a column."); + } + } + } + + + this.expression = expression; + this.alias = alias; + } + + public Optional getAlias() { + return alias; + } + + public Expression getExpression() { + return expression; + } + + @Override + public boolean equals(Object obj) { + if (this == obj) { + return true; + } + if (obj == null || getClass() != obj.getClass()) { + return false; + } + SingleColumn other = (SingleColumn) obj; + return Objects.equals(this.alias, other.alias) && Objects + .equals(this.expression, other.expression); + } + + @Override + public int hashCode() { + return Objects.hash(alias, expression); + } + + @Override + public String toString() { + if (alias.isPresent()) { + return expression.toString() + " " + alias.get(); + } + + return expression.toString(); + } + + @Override + public R accept(AstVisitor visitor, C context) { + return visitor.visitSingleColumn(this, context); + } +} diff --git a/ksql-core/src/main/java/io/confluent/ksql/parser/tree/SortItem.java b/ksql-core/src/main/java/io/confluent/ksql/parser/tree/SortItem.java new file mode 100644 index 000000000000..ef245c04c639 --- /dev/null +++ b/ksql-core/src/main/java/io/confluent/ksql/parser/tree/SortItem.java @@ -0,0 +1,89 @@ +/** + * Copyright 2017 Confluent Inc. + **/ + +package io.confluent.ksql.parser.tree; + +import java.util.Objects; +import java.util.Optional; + +import static com.google.common.base.MoreObjects.toStringHelper; + +public class SortItem + extends Node { + + public enum Ordering { + ASCENDING, DESCENDING + } + + public enum NullOrdering { + FIRST, LAST, UNDEFINED + } + + private final Expression sortKey; + private final Ordering ordering; + private final NullOrdering nullOrdering; + + public SortItem(Expression sortKey, Ordering ordering, NullOrdering nullOrdering) { + this(Optional.empty(), sortKey, ordering, nullOrdering); + } + + public SortItem(NodeLocation location, Expression sortKey, Ordering ordering, + NullOrdering nullOrdering) { + this(Optional.of(location), sortKey, ordering, nullOrdering); + } + + private SortItem(Optional location, Expression sortKey, Ordering ordering, + NullOrdering nullOrdering) { + super(location); + this.ordering = ordering; + this.sortKey = sortKey; + this.nullOrdering = nullOrdering; + } + + public Expression getSortKey() { + return sortKey; + } + + public Ordering getOrdering() { + return ordering; + } + + public NullOrdering getNullOrdering() { + return nullOrdering; + } + + @Override + public R accept(AstVisitor visitor, C context) { + return visitor.visitSortItem(this, context); + } + + @Override + public String toString() { + return toStringHelper(this) + .add("sortKey", sortKey) + .add("ordering", ordering) + .add("nullOrdering", nullOrdering) + .toString(); + } + + @Override + public boolean equals(Object o) { + if (this == o) { + return true; + } + if (o == null || getClass() != o.getClass()) { + return false; + } + + SortItem sortItem = (SortItem) o; + return Objects.equals(sortKey, sortItem.sortKey) + && (ordering == sortItem.ordering) + && (nullOrdering == sortItem.nullOrdering); + } + + @Override + public int hashCode() { + return Objects.hash(sortKey, ordering, nullOrdering); + } +} diff --git a/ksql-core/src/main/java/io/confluent/ksql/parser/tree/StackableAstVisitor.java b/ksql-core/src/main/java/io/confluent/ksql/parser/tree/StackableAstVisitor.java new file mode 100644 index 000000000000..49a182d51c94 --- /dev/null +++ b/ksql-core/src/main/java/io/confluent/ksql/parser/tree/StackableAstVisitor.java @@ -0,0 +1,52 @@ +/** + * Copyright 2017 Confluent Inc. + **/ + +package io.confluent.ksql.parser.tree; + +import java.util.LinkedList; +import java.util.Optional; + +import static java.util.Objects.requireNonNull; + +public class StackableAstVisitor + extends AstVisitor> { + + public R process(Node node, StackableAstVisitorContext context) { + context.push(node); + try { + return node.accept(this, context); + } finally { + context.pop(); + } + } + + public static class StackableAstVisitorContext { + + private final LinkedList stack = new LinkedList<>(); + private final C context; + + public StackableAstVisitorContext(C context) { + this.context = requireNonNull(context, "context is null"); + } + + public C getContext() { + return context; + } + + private void pop() { + stack.pop(); + } + + void push(Node node) { + stack.push(node); + } + + public Optional getPreviousNode() { + if (stack.size() > 1) { + return Optional.of(stack.get(1)); + } + return Optional.empty(); + } + } +} diff --git a/ksql-core/src/main/java/io/confluent/ksql/parser/tree/Statement.java b/ksql-core/src/main/java/io/confluent/ksql/parser/tree/Statement.java new file mode 100644 index 000000000000..fcccd1bec476 --- /dev/null +++ b/ksql-core/src/main/java/io/confluent/ksql/parser/tree/Statement.java @@ -0,0 +1,20 @@ +/** + * Copyright 2017 Confluent Inc. + **/ + +package io.confluent.ksql.parser.tree; + +import java.util.Optional; + +public abstract class Statement + extends Node { + + protected Statement(Optional location) { + super(location); + } + + @Override + public R accept(AstVisitor visitor, C context) { + return visitor.visitStatement(this, context); + } +} diff --git a/ksql-core/src/main/java/io/confluent/ksql/parser/tree/Statements.java b/ksql-core/src/main/java/io/confluent/ksql/parser/tree/Statements.java new file mode 100644 index 000000000000..d80b83144f96 --- /dev/null +++ b/ksql-core/src/main/java/io/confluent/ksql/parser/tree/Statements.java @@ -0,0 +1,54 @@ +/** + * Copyright 2017 Confluent Inc. + **/ + +package io.confluent.ksql.parser.tree; + +import java.util.List; +import java.util.Objects; +import java.util.Optional; + +import static com.google.common.base.MoreObjects.toStringHelper; + +public class Statements extends Node { + + public List statementList; + + public Statements(List statementList) { + super(Optional.empty()); + this.statementList = statementList; + } + + protected Statements(Optional location, List statementList) { + super(location); + } + + @Override + public R accept(AstVisitor visitor, C context) { + + return visitor.visitStatements(this, context); + } + + @Override + public String toString() { + return toStringHelper(this) + .toString(); + } + + @Override + public boolean equals(Object obj) { + if (this == obj) { + return true; + } + if ((obj == null) || (getClass() != obj.getClass())) { + return false; + } + Statements o = (Statements) obj; + return Objects.equals(statementList, o.statementList); + } + + @Override + public int hashCode() { + return Objects.hash(statementList); + } +} diff --git a/ksql-core/src/main/java/io/confluent/ksql/parser/tree/StringLiteral.java b/ksql-core/src/main/java/io/confluent/ksql/parser/tree/StringLiteral.java new file mode 100644 index 000000000000..7e532c90b103 --- /dev/null +++ b/ksql-core/src/main/java/io/confluent/ksql/parser/tree/StringLiteral.java @@ -0,0 +1,66 @@ +/** + * Copyright 2017 Confluent Inc. + **/ + +package io.confluent.ksql.parser.tree; + +import io.airlift.slice.Slice; + +import java.util.Objects; +import java.util.Optional; + +import static io.airlift.slice.Slices.utf8Slice; +import static java.util.Objects.requireNonNull; + +public class StringLiteral + extends Literal { + + private final String value; + private final Slice slice; + + public StringLiteral(String value) { + this(Optional.empty(), value); + } + + public StringLiteral(NodeLocation location, String value) { + this(Optional.of(location), value); + } + + private StringLiteral(Optional location, String value) { + super(location); + requireNonNull(value, "value is null"); + this.value = value; + this.slice = utf8Slice(value); + } + + public String getValue() { + return value; + } + + public Slice getSlice() { + return slice; + } + + @Override + public R accept(AstVisitor visitor, C context) { + return visitor.visitStringLiteral(this, context); + } + + @Override + public boolean equals(Object o) { + if (this == o) { + return true; + } + if (o == null || getClass() != o.getClass()) { + return false; + } + + StringLiteral that = (StringLiteral) o; + return Objects.equals(value, that.value); + } + + @Override + public int hashCode() { + return value.hashCode(); + } +} diff --git a/ksql-core/src/main/java/io/confluent/ksql/parser/tree/SubqueryExpression.java b/ksql-core/src/main/java/io/confluent/ksql/parser/tree/SubqueryExpression.java new file mode 100644 index 000000000000..388f5f94a61c --- /dev/null +++ b/ksql-core/src/main/java/io/confluent/ksql/parser/tree/SubqueryExpression.java @@ -0,0 +1,54 @@ +/** + * Copyright 2017 Confluent Inc. + **/ + +package io.confluent.ksql.parser.tree; + +import java.util.Objects; +import java.util.Optional; + +public class SubqueryExpression + extends Expression { + + private final Query query; + + public SubqueryExpression(Query query) { + this(Optional.empty(), query); + } + + public SubqueryExpression(NodeLocation location, Query query) { + this(Optional.of(location), query); + } + + private SubqueryExpression(Optional location, Query query) { + super(location); + this.query = query; + } + + public Query getQuery() { + return query; + } + + @Override + public R accept(AstVisitor visitor, C context) { + return visitor.visitSubqueryExpression(this, context); + } + + @Override + public boolean equals(Object o) { + if (this == o) { + return true; + } + if (o == null || getClass() != o.getClass()) { + return false; + } + + SubqueryExpression that = (SubqueryExpression) o; + return Objects.equals(query, that.query); + } + + @Override + public int hashCode() { + return query.hashCode(); + } +} diff --git a/ksql-core/src/main/java/io/confluent/ksql/parser/tree/SubscriptExpression.java b/ksql-core/src/main/java/io/confluent/ksql/parser/tree/SubscriptExpression.java new file mode 100644 index 000000000000..3509530d103d --- /dev/null +++ b/ksql-core/src/main/java/io/confluent/ksql/parser/tree/SubscriptExpression.java @@ -0,0 +1,63 @@ +/** + * Copyright 2017 Confluent Inc. + **/ + +package io.confluent.ksql.parser.tree; + +import java.util.Objects; +import java.util.Optional; + +import static java.util.Objects.requireNonNull; + +public class SubscriptExpression + extends Expression { + + private final Expression base; + private final Expression index; + + public SubscriptExpression(Expression base, Expression index) { + this(Optional.empty(), base, index); + } + + public SubscriptExpression(NodeLocation location, Expression base, Expression index) { + this(Optional.of(location), base, index); + } + + private SubscriptExpression(Optional location, Expression base, Expression index) { + super(location); + this.base = requireNonNull(base, "base is null"); + this.index = requireNonNull(index, "index is null"); + } + + @Override + public R accept(AstVisitor visitor, C context) { + return visitor.visitSubscriptExpression(this, context); + } + + public Expression getBase() { + return base; + } + + public Expression getIndex() { + return index; + } + + @Override + public boolean equals(Object o) { + if (this == o) { + return true; + } + if (o == null || getClass() != o.getClass()) { + return false; + } + + SubscriptExpression that = (SubscriptExpression) o; + + return Objects.equals(this.base, that.base) && Objects.equals(this.index, that.index); + } + + @Override + public int hashCode() { + return Objects.hash(base, index); + } +} diff --git a/ksql-core/src/main/java/io/confluent/ksql/parser/tree/SymbolReference.java b/ksql-core/src/main/java/io/confluent/ksql/parser/tree/SymbolReference.java new file mode 100644 index 000000000000..ee5ea8e02759 --- /dev/null +++ b/ksql-core/src/main/java/io/confluent/ksql/parser/tree/SymbolReference.java @@ -0,0 +1,45 @@ +/** + * Copyright 2017 Confluent Inc. + **/ + +package io.confluent.ksql.parser.tree; + +import java.util.Objects; +import java.util.Optional; + +public class SymbolReference + extends Expression { + + private final String name; + + public SymbolReference(String name) { + super(Optional.empty()); + this.name = name; + } + + public String getName() { + return name; + } + + @Override + public R accept(AstVisitor visitor, C context) { + return visitor.visitSymbolReference(this, context); + } + + @Override + public boolean equals(Object o) { + if (this == o) { + return true; + } + if (o == null || getClass() != o.getClass()) { + return false; + } + SymbolReference that = (SymbolReference) o; + return Objects.equals(name, that.name); + } + + @Override + public int hashCode() { + return Objects.hash(name); + } +} diff --git a/ksql-core/src/main/java/io/confluent/ksql/parser/tree/Table.java b/ksql-core/src/main/java/io/confluent/ksql/parser/tree/Table.java new file mode 100644 index 000000000000..04f83535230a --- /dev/null +++ b/ksql-core/src/main/java/io/confluent/ksql/parser/tree/Table.java @@ -0,0 +1,84 @@ +/** + * Copyright 2017 Confluent Inc. + **/ + +package io.confluent.ksql.parser.tree; + +import java.util.Map; +import java.util.Objects; +import java.util.Optional; + +public class Table + extends QueryBody { + + public final boolean isStdOut; + Map properties; + private final QualifiedName name; + + public Table(QualifiedName name) { + this(Optional.empty(), name, false); + } + + public Table(QualifiedName name, boolean isStdOut) { + this(Optional.empty(), name, isStdOut); + } + + public Table(NodeLocation location, QualifiedName name) { + this(Optional.of(location), name, false); + } + + public Table(NodeLocation location, QualifiedName name, boolean isStdOut) { + this(Optional.of(location), name, isStdOut); + } + + private Table(Optional location, QualifiedName name, boolean isStdOut) { + super(location); + this.name = name; + this.isStdOut = isStdOut; + } + + public QualifiedName getName() { + return name; + } + + public boolean isStdOut() { + return isStdOut; + } + + public Map getProperties() { + return properties; + } + + public void setProperties( + Map properties) { + this.properties = properties; + } + + @Override + public R accept(AstVisitor visitor, C context) { + return visitor.visitTable(this, context); + } + + @Override + public String toString() { + return name.toString(); + } + + @Override + public boolean equals(Object o) { + if (this == o) { + return true; + } + if (o == null || getClass() != o.getClass()) { + return false; + } + + Table table = (Table) o; + return Objects.equals(name, table.name); + } + + @Override + public int hashCode() { + return name.hashCode(); + } +} diff --git a/ksql-core/src/main/java/io/confluent/ksql/parser/tree/TableElement.java b/ksql-core/src/main/java/io/confluent/ksql/parser/tree/TableElement.java new file mode 100644 index 000000000000..89effd3cd0b1 --- /dev/null +++ b/ksql-core/src/main/java/io/confluent/ksql/parser/tree/TableElement.java @@ -0,0 +1,71 @@ +/** + * Copyright 2017 Confluent Inc. + **/ + +package io.confluent.ksql.parser.tree; + +import java.util.Objects; +import java.util.Optional; + +import static com.google.common.base.MoreObjects.toStringHelper; +import static java.util.Objects.requireNonNull; + +public final class TableElement + extends Node { + + private final String name; + private final String type; + + public TableElement(String name, String type) { + this(Optional.empty(), name, type); + } + + public TableElement(NodeLocation location, String name, String type) { + this(Optional.of(location), name, type); + } + + private TableElement(Optional location, String name, String type) { + super(location); + this.name = requireNonNull(name, "name is null"); + this.type = requireNonNull(type, "type is null"); + } + + public String getName() { + return name; + } + + public String getType() { + return type; + } + + @Override + public R accept(AstVisitor visitor, C context) { + return visitor.visitTableElement(this, context); + } + + @Override + public boolean equals(Object obj) { + if (this == obj) { + return true; + } + if (obj == null || getClass() != obj.getClass()) { + return false; + } + TableElement o = (TableElement) obj; + return Objects.equals(this.name, o.name) + && Objects.equals(this.type, o.type); + } + + @Override + public int hashCode() { + return Objects.hash(name, type); + } + + @Override + public String toString() { + return toStringHelper(this) + .add("name", name) + .add("type", type) + .toString(); + } +} diff --git a/ksql-core/src/main/java/io/confluent/ksql/parser/tree/TableSubquery.java b/ksql-core/src/main/java/io/confluent/ksql/parser/tree/TableSubquery.java new file mode 100644 index 000000000000..7393dc2d3604 --- /dev/null +++ b/ksql-core/src/main/java/io/confluent/ksql/parser/tree/TableSubquery.java @@ -0,0 +1,63 @@ +/** + * Copyright 2017 Confluent Inc. + **/ + +package io.confluent.ksql.parser.tree; + +import java.util.Objects; +import java.util.Optional; + +import static com.google.common.base.MoreObjects.toStringHelper; + +public class TableSubquery + extends QueryBody { + + private final Query query; + + public TableSubquery(Query query) { + this(Optional.empty(), query); + } + + public TableSubquery(NodeLocation location, Query query) { + this(Optional.of(location), query); + } + + private TableSubquery(Optional location, Query query) { + super(location); + this.query = query; + } + + public Query getQuery() { + return query; + } + + @Override + public R accept(AstVisitor visitor, C context) { + return visitor.visitTableSubquery(this, context); + } + + @Override + public String toString() { + return toStringHelper(this) + .addValue(query) + .toString(); + } + + @Override + public boolean equals(Object o) { + if (this == o) { + return true; + } + if (o == null || getClass() != o.getClass()) { + return false; + } + + TableSubquery tableSubquery = (TableSubquery) o; + return Objects.equals(query, tableSubquery.query); + } + + @Override + public int hashCode() { + return query.hashCode(); + } +} diff --git a/ksql-core/src/main/java/io/confluent/ksql/parser/tree/TerminateQuery.java b/ksql-core/src/main/java/io/confluent/ksql/parser/tree/TerminateQuery.java new file mode 100644 index 000000000000..17d9d521b615 --- /dev/null +++ b/ksql-core/src/main/java/io/confluent/ksql/parser/tree/TerminateQuery.java @@ -0,0 +1,49 @@ +/** + * Copyright 2017 Confluent Inc. + **/ + +package io.confluent.ksql.parser.tree; + +import java.util.Objects; +import java.util.Optional; + +import static com.google.common.base.MoreObjects.toStringHelper; +import static java.util.Objects.requireNonNull; + +public class TerminateQuery extends Statement { + + private final long queryId; + + public TerminateQuery(long queryId) { + this(Optional.empty(), queryId); + } + + public TerminateQuery(NodeLocation location, long queryId) { + this(Optional.of(location), queryId); + } + + private TerminateQuery(Optional location, long queryId) { + super(location); + this.queryId = requireNonNull(queryId, "table is null"); + } + + public long getQueryId() { + return queryId; + } + + @Override + public int hashCode() { + return Objects.hash("TerminateQuery"); + } + + @Override + public boolean equals(Object obj) { + return this == obj; + } + + @Override + public String toString() { + return toStringHelper(this) + .toString(); + } +} diff --git a/ksql-core/src/main/java/io/confluent/ksql/parser/tree/TimeLiteral.java b/ksql-core/src/main/java/io/confluent/ksql/parser/tree/TimeLiteral.java new file mode 100644 index 000000000000..9b124ecb3caf --- /dev/null +++ b/ksql-core/src/main/java/io/confluent/ksql/parser/tree/TimeLiteral.java @@ -0,0 +1,57 @@ +/** + * Copyright 2017 Confluent Inc. + **/ + +package io.confluent.ksql.parser.tree; + +import java.util.Objects; +import java.util.Optional; + +import static java.util.Objects.requireNonNull; + +public class TimeLiteral + extends Literal { + + private final String value; + + public TimeLiteral(String value) { + this(Optional.empty(), value); + } + + public TimeLiteral(NodeLocation location, String value) { + this(Optional.of(location), value); + } + + private TimeLiteral(Optional location, String value) { + super(location); + requireNonNull(value, "value is null"); + this.value = value; + } + + public String getValue() { + return value; + } + + @Override + public R accept(AstVisitor visitor, C context) { + return visitor.visitTimeLiteral(this, context); + } + + @Override + public boolean equals(Object o) { + if (this == o) { + return true; + } + if (o == null || getClass() != o.getClass()) { + return false; + } + + TimeLiteral that = (TimeLiteral) o; + return Objects.equals(value, that.value); + } + + @Override + public int hashCode() { + return value.hashCode(); + } +} diff --git a/ksql-core/src/main/java/io/confluent/ksql/parser/tree/TimestampLiteral.java b/ksql-core/src/main/java/io/confluent/ksql/parser/tree/TimestampLiteral.java new file mode 100644 index 000000000000..644df0438764 --- /dev/null +++ b/ksql-core/src/main/java/io/confluent/ksql/parser/tree/TimestampLiteral.java @@ -0,0 +1,58 @@ +/** + * Copyright 2017 Confluent Inc. + **/ + +package io.confluent.ksql.parser.tree; + +import java.util.Objects; +import java.util.Optional; + +import static java.util.Objects.requireNonNull; + +public class TimestampLiteral + extends Literal { + + private final String value; + + public TimestampLiteral(String value) { + this(Optional.empty(), value); + } + + public TimestampLiteral(NodeLocation location, String value) { + this(Optional.of(location), value); + } + + private TimestampLiteral(Optional location, String value) { + super(location); + requireNonNull(value, "value is null"); + + this.value = value; + } + + public String getValue() { + return value; + } + + @Override + public R accept(AstVisitor visitor, C context) { + return visitor.visitTimestampLiteral(this, context); + } + + @Override + public boolean equals(Object o) { + if (this == o) { + return true; + } + if (o == null || getClass() != o.getClass()) { + return false; + } + + TimestampLiteral that = (TimestampLiteral) o; + return Objects.equals(value, that.value); + } + + @Override + public int hashCode() { + return value.hashCode(); + } +} diff --git a/ksql-core/src/main/java/io/confluent/ksql/parser/tree/TumblingWindowExpression.java b/ksql-core/src/main/java/io/confluent/ksql/parser/tree/TumblingWindowExpression.java new file mode 100644 index 000000000000..a9db5df418d3 --- /dev/null +++ b/ksql-core/src/main/java/io/confluent/ksql/parser/tree/TumblingWindowExpression.java @@ -0,0 +1,60 @@ +/** + * Copyright 2017 Confluent Inc. + **/ + +package io.confluent.ksql.parser.tree; + +import java.util.Objects; +import java.util.Optional; + +public class TumblingWindowExpression extends KsqlWindowExpression { + + private final long size; + private final WindowExpression.WindowUnit sizeUnit; + + public TumblingWindowExpression(long size, WindowExpression.WindowUnit sizeUnit) { + this(Optional.empty(), "", size, sizeUnit); + } + + public TumblingWindowExpression(NodeLocation location, String windowName, + long size, WindowExpression.WindowUnit sizeUnit) { + this(Optional.of(location), windowName, size, sizeUnit); + } + + private TumblingWindowExpression(Optional location, String windowName, long size, + WindowExpression.WindowUnit sizeUnit) { + super(location); + this.size = size; + this.sizeUnit = sizeUnit; + } + + public long getSize() { + return size; + } + + public WindowExpression.WindowUnit getSizeUnit() { + return sizeUnit; + } + + @Override + public String toString() { + return " TUMBLING ( SIZE " + size + " " + sizeUnit + " ) "; + } + + @Override + public int hashCode() { + return Objects.hash(size, sizeUnit); + } + + @Override + public boolean equals(Object o) { + if (this == o) { + return true; + } + if (o == null || getClass() != o.getClass()) { + return false; + } + TumblingWindowExpression tumblingWindowExpression = (TumblingWindowExpression) o; + return tumblingWindowExpression.size == size && tumblingWindowExpression.sizeUnit == sizeUnit; + } +} diff --git a/ksql-core/src/main/java/io/confluent/ksql/parser/tree/Union.java b/ksql-core/src/main/java/io/confluent/ksql/parser/tree/Union.java new file mode 100644 index 000000000000..33544b8a7d90 --- /dev/null +++ b/ksql-core/src/main/java/io/confluent/ksql/parser/tree/Union.java @@ -0,0 +1,70 @@ +/** + * Copyright 2017 Confluent Inc. + **/ + +package io.confluent.ksql.parser.tree; + +import com.google.common.collect.ImmutableList; + +import java.util.List; +import java.util.Objects; +import java.util.Optional; + +import static com.google.common.base.MoreObjects.toStringHelper; +import static java.util.Objects.requireNonNull; + +public class Union + extends SetOperation { + + private final List relations; + + public Union(List relations, boolean distinct) { + this(Optional.empty(), relations, distinct); + } + + public Union(NodeLocation location, List relations, boolean distinct) { + this(Optional.of(location), relations, distinct); + } + + private Union(Optional location, List relations, boolean distinct) { + super(location, distinct); + requireNonNull(relations, "relations is null"); + + this.relations = ImmutableList.copyOf(relations); + } + + public List getRelations() { + return relations; + } + + @Override + public R accept(AstVisitor visitor, C context) { + return visitor.visitUnion(this, context); + } + + @Override + public String toString() { + return toStringHelper(this) + .add("relations", relations) + .add("distinct", isDistinct()) + .toString(); + } + + @Override + public boolean equals(Object obj) { + if (this == obj) { + return true; + } + if ((obj == null) || (getClass() != obj.getClass())) { + return false; + } + Union o = (Union) obj; + return Objects.equals(relations, o.relations) + && Objects.equals(isDistinct(), o.isDistinct()); + } + + @Override + public int hashCode() { + return Objects.hash(relations, isDistinct()); + } +} diff --git a/ksql-core/src/main/java/io/confluent/ksql/parser/tree/UnsetProperty.java b/ksql-core/src/main/java/io/confluent/ksql/parser/tree/UnsetProperty.java new file mode 100644 index 000000000000..d091ec97ab36 --- /dev/null +++ b/ksql-core/src/main/java/io/confluent/ksql/parser/tree/UnsetProperty.java @@ -0,0 +1,48 @@ +/** + * Copyright 2017 Confluent Inc. + **/ + +package io.confluent.ksql.parser.tree; + +import java.util.Objects; +import java.util.Optional; + +import static com.google.common.base.MoreObjects.toStringHelper; +import static java.util.Objects.requireNonNull; + +public class UnsetProperty extends Statement { + + private final String propertyName; + + public UnsetProperty(Optional location, String propertyName) { + super(location); + requireNonNull(propertyName, "propertyName is null"); + this.propertyName = propertyName; + } + + public String getPropertyName() { + return propertyName; + } + + @Override + public boolean equals(Object o) { + if (this == o) { + return true; + } + if (!(o instanceof UnsetProperty)) { + return false; + } + UnsetProperty that = (UnsetProperty) o; + return Objects.equals(getPropertyName(), that.getPropertyName()); + } + + @Override + public int hashCode() { + return Objects.hash(getPropertyName()); + } + + @Override + public String toString() { + return toStringHelper(this).toString(); + } +} diff --git a/ksql-core/src/main/java/io/confluent/ksql/parser/tree/Values.java b/ksql-core/src/main/java/io/confluent/ksql/parser/tree/Values.java new file mode 100644 index 000000000000..0a5459a116a6 --- /dev/null +++ b/ksql-core/src/main/java/io/confluent/ksql/parser/tree/Values.java @@ -0,0 +1,65 @@ +/** + * Copyright 2017 Confluent Inc. + **/ + +package io.confluent.ksql.parser.tree; + +import com.google.common.base.Joiner; +import com.google.common.collect.ImmutableList; + +import java.util.List; +import java.util.Objects; +import java.util.Optional; + +import static java.util.Objects.requireNonNull; + +public final class Values + extends QueryBody { + + private final List rows; + + public Values(List rows) { + this(Optional.empty(), rows); + } + + public Values(NodeLocation location, List rows) { + this(Optional.of(location), rows); + } + + private Values(Optional location, List rows) { + super(location); + requireNonNull(rows, "rows is null"); + this.rows = ImmutableList.copyOf(rows); + } + + public List getRows() { + return rows; + } + + @Override + public R accept(AstVisitor visitor, C context) { + return visitor.visitValues(this, context); + } + + @Override + public String toString() { + return "(" + Joiner.on(", ").join(rows) + ")"; + } + + @Override + public int hashCode() { + return Objects.hash(rows); + } + + @Override + public boolean equals(Object obj) { + if (this == obj) { + return true; + } + if (obj == null || getClass() != obj.getClass()) { + return false; + } + Values other = (Values) obj; + return Objects.equals(this.rows, other.rows); + } +} diff --git a/ksql-core/src/main/java/io/confluent/ksql/parser/tree/WhenClause.java b/ksql-core/src/main/java/io/confluent/ksql/parser/tree/WhenClause.java new file mode 100644 index 000000000000..2ab3e9d4a7f3 --- /dev/null +++ b/ksql-core/src/main/java/io/confluent/ksql/parser/tree/WhenClause.java @@ -0,0 +1,61 @@ +/** + * Copyright 2017 Confluent Inc. + **/ + +package io.confluent.ksql.parser.tree; + +import java.util.Objects; +import java.util.Optional; + +public class WhenClause + extends Expression { + + private final Expression operand; + private final Expression result; + + public WhenClause(Expression operand, Expression result) { + this(Optional.empty(), operand, result); + } + + public WhenClause(NodeLocation location, Expression operand, Expression result) { + this(Optional.of(location), operand, result); + } + + private WhenClause(Optional location, Expression operand, Expression result) { + super(location); + this.operand = operand; + this.result = result; + } + + public Expression getOperand() { + return operand; + } + + public Expression getResult() { + return result; + } + + @Override + public R accept(AstVisitor visitor, C context) { + return visitor.visitWhenClause(this, context); + } + + @Override + public boolean equals(Object o) { + if (this == o) { + return true; + } + if (o == null || getClass() != o.getClass()) { + return false; + } + + WhenClause that = (WhenClause) o; + return Objects.equals(operand, that.operand) + && Objects.equals(result, that.result); + } + + @Override + public int hashCode() { + return Objects.hash(operand, result); + } +} diff --git a/ksql-core/src/main/java/io/confluent/ksql/parser/tree/Window.java b/ksql-core/src/main/java/io/confluent/ksql/parser/tree/Window.java new file mode 100644 index 000000000000..b43892f5cef8 --- /dev/null +++ b/ksql-core/src/main/java/io/confluent/ksql/parser/tree/Window.java @@ -0,0 +1,62 @@ +/** + * Copyright 2017 Confluent Inc. + **/ + +package io.confluent.ksql.parser.tree; + +import java.util.Objects; +import java.util.Optional; + +import static java.util.Objects.requireNonNull; + +public class Window + extends Node { + + + private final WindowExpression windowExpression; + + public Window(String windowName, WindowExpression windowExpression) { + this(Optional.empty(), windowName, windowExpression); + } + + public Window(NodeLocation location, String windowName, WindowExpression windowExpression) { + this(Optional.of(location), windowName, windowExpression); + } + + private Window(Optional location, String windowName, + WindowExpression windowExpression) { + super(location); + this.windowExpression = requireNonNull(windowExpression, "windowExpression is null"); + } + + public WindowExpression getWindowExpression() { + return windowExpression; + } + + @Override + public R accept(AstVisitor visitor, C context) { + return visitor.visitWindow(this, context); + } + + @Override + public boolean equals(Object obj) { + if (this == obj) { + return true; + } + if ((obj == null) || (getClass() != obj.getClass())) { + return false; + } + Window o = (Window) obj; + return Objects.equals(windowExpression, o.windowExpression); + } + + @Override + public int hashCode() { + return Objects.hash(windowExpression); + } + + @Override + public String toString() { + return " WINDOW " + windowExpression.toString(); + } +} diff --git a/ksql-core/src/main/java/io/confluent/ksql/parser/tree/WindowExpression.java b/ksql-core/src/main/java/io/confluent/ksql/parser/tree/WindowExpression.java new file mode 100644 index 000000000000..1b1c48b6afc8 --- /dev/null +++ b/ksql-core/src/main/java/io/confluent/ksql/parser/tree/WindowExpression.java @@ -0,0 +1,80 @@ +/** + * Copyright 2017 Confluent Inc. + **/ + +package io.confluent.ksql.parser.tree; + +import java.util.Objects; +import java.util.Optional; + +public class WindowExpression extends Node { + + private final String windowName; + private final KsqlWindowExpression ksqlWindowExpression; + + public WindowExpression(String windowName, KsqlWindowExpression ksqlWindowExpression) { + this(Optional.empty(), windowName, ksqlWindowExpression); + } + + protected WindowExpression(Optional location, String windowName, + KsqlWindowExpression ksqlWindowExpression) { + super(location); + this.windowName = windowName; + this.ksqlWindowExpression = ksqlWindowExpression; + } + + public String getWindowName() { + return windowName; + } + + public KsqlWindowExpression getKsqlWindowExpression() { + return ksqlWindowExpression; + } + + @Override + public boolean equals(Object obj) { + if (this == obj) { + return true; + } + if ((obj == null) || (getClass() != obj.getClass())) { + return false; + } + WindowExpression o = (WindowExpression) obj; + return Objects.equals(ksqlWindowExpression, o.ksqlWindowExpression); + } + + @Override + public int hashCode() { + return Objects.hash(windowName, ksqlWindowExpression); + } + + @Override + public String toString() { + return " WINDOW " + windowName + " " + ksqlWindowExpression.toString(); + } + + public static enum WindowUnit { DAY, HOUR, MINUTE, SECOND, MILLISECOND } + + public static WindowUnit getWindowUnit(String windowUnitString) { + switch (windowUnitString) { + case "DAY": + case "DAYS": + return WindowUnit.DAY; + case "HOUR": + case "HOURS": + return WindowUnit.HOUR; + case "MINUTE": + case "MINUTES": + return WindowUnit.MINUTE; + case "SECOND": + case "SECONDS": + return WindowUnit.SECOND; + case "MILLISECOND": + case "MILLISECONDS": + return WindowUnit.MILLISECOND; + default: + return null; + } + } + +} diff --git a/ksql-core/src/main/java/io/confluent/ksql/parser/tree/WindowFrame.java b/ksql-core/src/main/java/io/confluent/ksql/parser/tree/WindowFrame.java new file mode 100644 index 000000000000..5ab8f1e6cfb1 --- /dev/null +++ b/ksql-core/src/main/java/io/confluent/ksql/parser/tree/WindowFrame.java @@ -0,0 +1,84 @@ +/** + * Copyright 2017 Confluent Inc. + **/ + +package io.confluent.ksql.parser.tree; + +import java.util.Objects; +import java.util.Optional; + +import static com.google.common.base.MoreObjects.toStringHelper; +import static java.util.Objects.requireNonNull; + +public class WindowFrame + extends Node { + + public enum Type { + RANGE, ROWS + } + + private final Type type; + private final FrameBound start; + private final Optional end; + + public WindowFrame(Type type, FrameBound start, Optional end) { + this(Optional.empty(), type, start, end); + } + + public WindowFrame(NodeLocation location, Type type, FrameBound start, Optional end) { + this(Optional.of(location), type, start, end); + } + + private WindowFrame(Optional location, Type type, FrameBound start, + Optional end) { + super(location); + this.type = requireNonNull(type, "type is null"); + this.start = requireNonNull(start, "start is null"); + this.end = requireNonNull(end, "end is null"); + } + + public Type getType() { + return type; + } + + public FrameBound getStart() { + return start; + } + + public Optional getEnd() { + return end; + } + + @Override + public R accept(AstVisitor visitor, C context) { + return visitor.visitWindowFrame(this, context); + } + + @Override + public boolean equals(Object obj) { + if (this == obj) { + return true; + } + if ((obj == null) || (getClass() != obj.getClass())) { + return false; + } + WindowFrame o = (WindowFrame) obj; + return Objects.equals(type, o.type) + && Objects.equals(start, o.start) + && Objects.equals(end, o.end); + } + + @Override + public int hashCode() { + return Objects.hash(type, start, end); + } + + @Override + public String toString() { + return toStringHelper(this) + .add("type", type) + .add("start", start) + .add("end", end) + .toString(); + } +} diff --git a/ksql-core/src/main/java/io/confluent/ksql/parser/tree/WindowName.java b/ksql-core/src/main/java/io/confluent/ksql/parser/tree/WindowName.java new file mode 100644 index 000000000000..0194c26b2dd9 --- /dev/null +++ b/ksql-core/src/main/java/io/confluent/ksql/parser/tree/WindowName.java @@ -0,0 +1,49 @@ +/** + * Copyright 2017 Confluent Inc. + **/ + +package io.confluent.ksql.parser.tree; + +import java.util.Objects; +import java.util.Optional; + + +public class WindowName extends Node { + + private String windowName; + + public WindowName(String windowName) { + this(Optional.empty(), windowName); + } + + public WindowName(NodeLocation location, String windowName) { + this(Optional.of(location), windowName); + } + + private WindowName(Optional location, String windowName) { + super(location); + this.windowName = windowName; + } + + @Override + public boolean equals(Object obj) { + if (this == obj) { + return true; + } + if ((obj == null) || (getClass() != obj.getClass())) { + return false; + } + WindowName o = (WindowName) obj; + return Objects.equals(windowName, o.windowName); + } + + @Override + public int hashCode() { + return Objects.hash(windowName); + } + + @Override + public String toString() { + return windowName; + } +} diff --git a/ksql-core/src/main/java/io/confluent/ksql/parser/tree/With.java b/ksql-core/src/main/java/io/confluent/ksql/parser/tree/With.java new file mode 100644 index 000000000000..de7e31d50d77 --- /dev/null +++ b/ksql-core/src/main/java/io/confluent/ksql/parser/tree/With.java @@ -0,0 +1,78 @@ +/** + * Copyright 2017 Confluent Inc. + **/ + +package io.confluent.ksql.parser.tree; + +import com.google.common.collect.ImmutableList; + +import java.util.List; +import java.util.Objects; +import java.util.Optional; + +import static com.google.common.base.MoreObjects.toStringHelper; +import static com.google.common.base.Preconditions.checkArgument; +import static java.util.Objects.requireNonNull; + +public class With + extends Node { + + private final boolean recursive; + private final List queries; + + public With(boolean recursive, List queries) { + this(Optional.empty(), recursive, queries); + } + + public With(NodeLocation location, boolean recursive, List queries) { + this(Optional.of(location), recursive, queries); + } + + private With(Optional location, boolean recursive, List queries) { + super(location); + requireNonNull(queries, "queries is null"); + checkArgument(!queries.isEmpty(), "queries is empty"); + + this.recursive = recursive; + this.queries = ImmutableList.copyOf(queries); + } + + public boolean isRecursive() { + return recursive; + } + + public List getQueries() { + return queries; + } + + @Override + public R accept(AstVisitor visitor, C context) { + return visitor.visitWith(this, context); + } + + @Override + public boolean equals(Object obj) { + if (this == obj) { + return true; + } + if ((obj == null) || (getClass() != obj.getClass())) { + return false; + } + With o = (With) obj; + return Objects.equals(recursive, o.recursive) + && Objects.equals(queries, o.queries); + } + + @Override + public int hashCode() { + return Objects.hash(recursive, queries); + } + + @Override + public String toString() { + return toStringHelper(this) + .add("recursive", recursive) + .add("queries", queries) + .toString(); + } +} diff --git a/ksql-core/src/main/java/io/confluent/ksql/parser/tree/WithQuery.java b/ksql-core/src/main/java/io/confluent/ksql/parser/tree/WithQuery.java new file mode 100644 index 000000000000..0493ce83222d --- /dev/null +++ b/ksql-core/src/main/java/io/confluent/ksql/parser/tree/WithQuery.java @@ -0,0 +1,83 @@ +/** + * Copyright 2017 Confluent Inc. + **/ + +package io.confluent.ksql.parser.tree; + +import java.util.List; +import java.util.Objects; +import java.util.Optional; + +import static com.google.common.base.MoreObjects.toStringHelper; +import static java.util.Objects.requireNonNull; + +public class WithQuery + extends Node { + + private final String name; + private final Query query; + private final Optional> columnNames; + + public WithQuery(String name, Query query, Optional> columnNames) { + this(Optional.empty(), name, query, columnNames); + } + + public WithQuery(NodeLocation location, String name, Query query, + Optional> columnNames) { + this(Optional.of(location), name, query, columnNames); + } + + private WithQuery(Optional location, String name, Query query, + Optional> columnNames) { + super(location); + this.name = QualifiedName.of(requireNonNull(name, "name is null")).getParts().get(0); + this.query = requireNonNull(query, "query is null"); + this.columnNames = requireNonNull(columnNames, "columnNames is null"); + } + + public String getName() { + return name; + } + + public Query getQuery() { + return query; + } + + public Optional> getColumnNames() { + return columnNames; + } + + @Override + public R accept(AstVisitor visitor, C context) { + return visitor.visitWithQuery(this, context); + } + + @Override + public String toString() { + return toStringHelper(this) + .add("name", name) + .add("query", query) + .add("columnNames", columnNames) + .omitNullValues() + .toString(); + } + + @Override + public int hashCode() { + return Objects.hash(name, query, columnNames); + } + + @Override + public boolean equals(Object obj) { + if (this == obj) { + return true; + } + if ((obj == null) || (getClass() != obj.getClass())) { + return false; + } + WithQuery o = (WithQuery) obj; + return Objects.equals(name, o.name) + && Objects.equals(query, o.query) + && Objects.equals(columnNames, o.columnNames); + } +} diff --git a/ksql-core/src/main/java/io/confluent/ksql/physical/GenericRow.java b/ksql-core/src/main/java/io/confluent/ksql/physical/GenericRow.java new file mode 100644 index 000000000000..9d564785eb71 --- /dev/null +++ b/ksql-core/src/main/java/io/confluent/ksql/physical/GenericRow.java @@ -0,0 +1,71 @@ +/** + * Copyright 2017 Confluent Inc. + **/ + +package io.confluent.ksql.physical; + +import java.util.ArrayList; +import java.util.Arrays; +import java.util.List; +import java.util.Objects; + +public class GenericRow { + + private final List columns; + + public GenericRow() { + columns = new ArrayList<>(); + } + + public GenericRow(List columns) { + Objects.requireNonNull(columns); + this.columns = columns; + } + + @Override + public String toString() { + StringBuilder stringBuilder = new StringBuilder("[ "); + int currentIndex = 0; + for (int i = 0; i < columns.size(); i++) { + Object obj = columns.get(i); + if (obj == null) { + stringBuilder.append("null"); + } else if (obj.getClass().isArray()) { + stringBuilder.append(Arrays.toString((Object[]) obj)); + } else if (obj instanceof String) { + stringBuilder.append("'" + obj + "'"); + } else { + stringBuilder.append(obj); + } + + currentIndex++; + if (currentIndex < columns.size()) { + stringBuilder.append(" | "); + } + } + stringBuilder.append(" ]"); + return stringBuilder.toString(); + } + + @Override + public boolean equals(Object o) { + if (this == o) return true; + if (o == null || getClass() != o.getClass()) return false; + GenericRow that = (GenericRow) o; + + if (columns.size() != that.columns.size()) return false; + + // For now string matching is used to compare the rows as double comparision will cause issues + return this.toString().equals(that.toString()); + } + + @Override + public int hashCode() { + return Objects.hash(columns); + } + + public List getColumns() { + return columns; + } + +} diff --git a/ksql-core/src/main/java/io/confluent/ksql/physical/PhysicalPlanBuilder.java b/ksql-core/src/main/java/io/confluent/ksql/physical/PhysicalPlanBuilder.java new file mode 100644 index 000000000000..41f08e2ab29d --- /dev/null +++ b/ksql-core/src/main/java/io/confluent/ksql/physical/PhysicalPlanBuilder.java @@ -0,0 +1,693 @@ +/** + * Copyright 2017 Confluent Inc. + **/ + +package io.confluent.ksql.physical; + +import io.confluent.ksql.ddl.DdlConfig; +import io.confluent.ksql.function.KsqlAggregateFunction; +import io.confluent.ksql.function.KsqlFunctions; +import io.confluent.ksql.function.udaf.KudafAggregator; +import io.confluent.ksql.function.udaf.KudafInitializer; +import io.confluent.ksql.metastore.KsqlStream; +import io.confluent.ksql.metastore.KsqlTable; +import io.confluent.ksql.metastore.KsqlTopic; +import io.confluent.ksql.metastore.MetastoreUtil; +import io.confluent.ksql.metastore.StructuredDataSource; +import io.confluent.ksql.parser.rewrite.AggregateExpressionRewriter; +import io.confluent.ksql.parser.tree.Expression; +import io.confluent.ksql.parser.tree.FunctionCall; +import io.confluent.ksql.planner.plan.AggregateNode; +import io.confluent.ksql.planner.plan.FilterNode; +import io.confluent.ksql.planner.plan.JoinNode; +import io.confluent.ksql.planner.plan.KsqlBareOutputNode; +import io.confluent.ksql.planner.plan.KsqlStructuredDataOutputNode; +import io.confluent.ksql.planner.plan.OutputNode; +import io.confluent.ksql.planner.plan.PlanNode; +import io.confluent.ksql.planner.plan.ProjectNode; +import io.confluent.ksql.planner.plan.SourceNode; +import io.confluent.ksql.planner.plan.StructuredDataSourceNode; +import io.confluent.ksql.serde.KsqlTopicSerDe; +import io.confluent.ksql.serde.avro.KsqlAvroTopicSerDe; +import io.confluent.ksql.structured.SchemaKGroupedStream; +import io.confluent.ksql.structured.SchemaKStream; +import io.confluent.ksql.structured.SchemaKTable; +import io.confluent.ksql.util.KafkaTopicClient; +import io.confluent.ksql.util.KsqlConfig; +import io.confluent.ksql.util.KsqlException; +import io.confluent.ksql.util.Pair; +import io.confluent.ksql.util.SchemaUtil; +import io.confluent.ksql.util.SerDeUtil; +import io.confluent.ksql.util.WindowedSerde; +import org.apache.kafka.common.serialization.Serde; +import org.apache.kafka.common.serialization.Serdes; +import org.apache.kafka.connect.data.Field; +import org.apache.kafka.connect.data.Schema; +import org.apache.kafka.connect.data.SchemaBuilder; +import org.apache.kafka.streams.KeyValue; +import org.apache.kafka.streams.StreamsConfig; +import org.apache.kafka.streams.kstream.KStream; +import org.apache.kafka.streams.kstream.KStreamBuilder; +import org.apache.kafka.streams.kstream.KTable; +import org.apache.kafka.streams.kstream.KeyValueMapper; +import org.apache.kafka.streams.kstream.Reducer; +import org.apache.kafka.streams.kstream.ValueTransformer; +import org.apache.kafka.streams.kstream.ValueTransformerSupplier; +import org.apache.kafka.streams.kstream.Windowed; +import org.apache.kafka.streams.processor.ProcessorContext; +import org.apache.kafka.streams.processor.TopologyBuilder; +import org.slf4j.Logger; +import org.slf4j.LoggerFactory; + +import java.util.ArrayList; +import java.util.Arrays; +import java.util.Collections; +import java.util.HashMap; +import java.util.List; +import java.util.Map; +import java.util.Optional; +import java.util.Set; + +public class PhysicalPlanBuilder { + + private static final Logger log = LoggerFactory.getLogger(PhysicalPlanBuilder.class); + + private final KStreamBuilder builder; + private final KsqlConfig ksqlConfig; + private final KafkaTopicClient kafkaTopicClient; + + private OutputNode planSink = null; + + public PhysicalPlanBuilder(final KStreamBuilder builder, final KsqlConfig ksqlConfig, KafkaTopicClient kafkaTopicClient) { + this.builder = builder; + this.ksqlConfig = ksqlConfig; + this.kafkaTopicClient = kafkaTopicClient; + } + + public SchemaKStream buildPhysicalPlan(final PlanNode logicalPlanRoot) throws Exception { + return kafkaStreamsDsl(logicalPlanRoot); + } + + private SchemaKStream kafkaStreamsDsl(final PlanNode planNode) throws Exception { + return kafkaStreamsDsl(planNode, new HashMap<>()); + } + + private SchemaKStream kafkaStreamsDsl(final PlanNode planNode, Map propsMap) throws + Exception { + if (planNode instanceof SourceNode) { + return buildSource((SourceNode) planNode, propsMap); + } else if (planNode instanceof JoinNode) { + return buildJoin((JoinNode) planNode, propsMap); + } else if (planNode instanceof AggregateNode) { + AggregateNode aggregateNode = (AggregateNode) planNode; + SchemaKStream aggregateSchemaStream = buildAggregate(aggregateNode, propsMap); + return aggregateSchemaStream; + } else if (planNode instanceof ProjectNode) { + ProjectNode projectNode = (ProjectNode) planNode; + SchemaKStream projectedSchemaStream = buildProject(projectNode, propsMap); + return projectedSchemaStream; + } else if (planNode instanceof FilterNode) { + FilterNode filterNode = (FilterNode) planNode; + SchemaKStream filteredSchemaStream = buildFilter(filterNode, propsMap); + return filteredSchemaStream; + } else if (planNode instanceof OutputNode) { + OutputNode outputNode = (OutputNode) planNode; + SchemaKStream outputSchemaStream = buildOutput(outputNode, propsMap); + return outputSchemaStream; + } + throw new KsqlException( + "Unsupported logical plan node: " + planNode.getId() + " , Type: " + planNode.getClass() + .getName()); + } + + private SchemaKStream buildOutput(final OutputNode outputNode, Map propsMap) + throws Exception { + SchemaKStream schemaKStream = kafkaStreamsDsl(outputNode.getSource()); + Set rowkeyIndexes = SchemaUtil.getRowTimeRowKeyIndexes(outputNode.getSchema()); + if (outputNode instanceof KsqlStructuredDataOutputNode) { + KsqlStructuredDataOutputNode ksqlStructuredDataOutputNode = (KsqlStructuredDataOutputNode) + outputNode; + KsqlStructuredDataOutputNode ksqlStructuredDataOutputNodeNoRowKey = new + KsqlStructuredDataOutputNode( + ksqlStructuredDataOutputNode.getId(), + ksqlStructuredDataOutputNode.getSource(), + SchemaUtil.removeImplicitRowTimeRowKeyFromSchema( + ksqlStructuredDataOutputNode.getSchema()), + ksqlStructuredDataOutputNode.getTimestampField(), + ksqlStructuredDataOutputNode.getKeyField(), + ksqlStructuredDataOutputNode.getKsqlTopic(), + ksqlStructuredDataOutputNode.getKafkaTopicName(), + ksqlStructuredDataOutputNode.getOutputProperties(), + ksqlStructuredDataOutputNode.getLimit()); + if (ksqlStructuredDataOutputNodeNoRowKey.getKsqlTopic() + .getKsqlTopicSerDe() instanceof KsqlAvroTopicSerDe) { + KsqlAvroTopicSerDe ksqlAvroTopicSerDe = + (KsqlAvroTopicSerDe) ksqlStructuredDataOutputNodeNoRowKey + .getKsqlTopic().getKsqlTopicSerDe(); + ksqlStructuredDataOutputNodeNoRowKey = + addAvroSchemaToResultTopic(ksqlStructuredDataOutputNodeNoRowKey); + } + + Map outputProperties = ksqlStructuredDataOutputNodeNoRowKey + .getOutputProperties(); + if (outputProperties.containsKey(KsqlConfig.SINK_NUMBER_OF_PARTITIONS)) { + ksqlConfig.put(KsqlConfig.SINK_NUMBER_OF_PARTITIONS, + outputProperties.get(KsqlConfig.SINK_NUMBER_OF_PARTITIONS)); + } + if (outputProperties.containsKey(KsqlConfig.SINK_NUMBER_OF_REPLICATIONS)) { + ksqlConfig.put(KsqlConfig.SINK_NUMBER_OF_REPLICATIONS, + outputProperties.get(KsqlConfig.SINK_NUMBER_OF_REPLICATIONS)); + } + SchemaKStream resultSchemaStream = schemaKStream; + if (!(resultSchemaStream instanceof SchemaKTable)) { + resultSchemaStream = new SchemaKStream(ksqlStructuredDataOutputNode.getSchema(), + schemaKStream.getKstream(), + ksqlStructuredDataOutputNode + .getKeyField(), + Arrays.asList(schemaKStream), + SchemaKStream.Type.SINK + ); + + if (outputProperties.containsKey(DdlConfig.PARTITION_BY_PROPERTY)) { + String keyFieldName = outputProperties.get(DdlConfig.PARTITION_BY_PROPERTY).toString(); + Optional keyField = SchemaUtil.getFieldByName( + resultSchemaStream.getSchema(), keyFieldName); + if (!keyField.isPresent()) { + throw new KsqlException(String.format("Column %s does not exist in the result schema." + + " Error in Partition By clause.", + keyFieldName)); + } + resultSchemaStream = resultSchemaStream.selectKey(keyField.get()); + + ksqlStructuredDataOutputNodeNoRowKey = new + KsqlStructuredDataOutputNode( + ksqlStructuredDataOutputNodeNoRowKey.getId(), + ksqlStructuredDataOutputNodeNoRowKey.getSource(), + ksqlStructuredDataOutputNodeNoRowKey.getSchema(), + ksqlStructuredDataOutputNodeNoRowKey.getTimestampField(), + keyField.get(), + ksqlStructuredDataOutputNodeNoRowKey.getKsqlTopic(), + ksqlStructuredDataOutputNodeNoRowKey.getKafkaTopicName(), + ksqlStructuredDataOutputNodeNoRowKey.getOutputProperties(), + ksqlStructuredDataOutputNodeNoRowKey.getLimit()); + } + } + + resultSchemaStream = resultSchemaStream.into( + ksqlStructuredDataOutputNodeNoRowKey.getKafkaTopicName(), + SerDeUtil.getRowSerDe( + ksqlStructuredDataOutputNodeNoRowKey.getKsqlTopic().getKsqlTopicSerDe(), + ksqlStructuredDataOutputNodeNoRowKey.getSchema()), + rowkeyIndexes, + ksqlConfig, + kafkaTopicClient); + + + KsqlStructuredDataOutputNode ksqlStructuredDataOutputNodeWithRowkey = new + KsqlStructuredDataOutputNode( + ksqlStructuredDataOutputNodeNoRowKey.getId(), + ksqlStructuredDataOutputNodeNoRowKey.getSource(), + SchemaUtil.addImplicitRowTimeRowKeyToSchema( + ksqlStructuredDataOutputNodeNoRowKey.getSchema()), + ksqlStructuredDataOutputNodeNoRowKey.getTimestampField(), + ksqlStructuredDataOutputNodeNoRowKey.getKeyField(), + ksqlStructuredDataOutputNodeNoRowKey.getKsqlTopic(), + ksqlStructuredDataOutputNodeNoRowKey.getKafkaTopicName(), + ksqlStructuredDataOutputNodeNoRowKey.getOutputProperties(), + ksqlStructuredDataOutputNodeNoRowKey.getLimit() + ); + this.planSink = ksqlStructuredDataOutputNodeWithRowkey; + return resultSchemaStream; + } else if (outputNode instanceof KsqlBareOutputNode) { + SchemaKStream resultSchemaStream = schemaKStream.toQueue(outputNode.getLimit()); + KsqlBareOutputNode ksqlBareOutputNode = (KsqlBareOutputNode) outputNode; + this.planSink = ksqlBareOutputNode; + return resultSchemaStream; + } + throw new KsqlException("Unsupported output logical node: " + outputNode.getClass().getName()); + } + + private SchemaKStream buildAggregate(final AggregateNode aggregateNode, + Map propsMap) throws Exception { + + StructuredDataSourceNode streamSourceNode = aggregateNode.getTheSourceNode(); + + SchemaKStream sourceSchemaKStream = kafkaStreamsDsl(aggregateNode.getSource()); + + SchemaKStream rekeyedSchemaKStream = aggregateReKey(aggregateNode, sourceSchemaKStream); + + // Pre aggregate computations + List> aggArgExpansionList = new ArrayList<>(); + Map expressionNames = new HashMap<>(); + for (Expression expression: aggregateNode.getRequiredColumnList()) { + if (!expressionNames.containsKey(expression.toString())) { + expressionNames.put(expression.toString(), aggArgExpansionList.size()); + aggArgExpansionList.add(new Pair<>(expression.toString(), expression)); + } + } + for (Expression expression: aggregateNode.getAggregateFunctionArguments()) { + if (!expressionNames.containsKey(expression.toString())) { + expressionNames.put(expression.toString(), aggArgExpansionList.size()); + aggArgExpansionList.add(new Pair<>(expression.toString(), expression)); + } + } + + SchemaKStream aggregateArgExpanded = rekeyedSchemaKStream.select(aggArgExpansionList); + + Serde genericRowSerde = + SerDeUtil.getRowSerDe(streamSourceNode.getStructuredDataSource() + .getKsqlTopic() + .getKsqlTopicSerDe(), + aggregateArgExpanded.getSchema()); + SchemaKGroupedStream schemaKGroupedStream = + aggregateArgExpanded.groupByKey(Serdes.String(), genericRowSerde); + + // Aggregate computations + Map aggValToAggFunctionMap = new HashMap<>(); + Map aggValToValColumnMap = new HashMap<>(); + SchemaBuilder aggregateSchema = SchemaBuilder.struct(); + + List resultColumns = new ArrayList(); + int nonAggColumnIndex = 0; + for (Expression expression: aggregateNode.getRequiredColumnList()) { + String exprStr = expression.toString(); + int index = getIndexInSchema(exprStr, aggregateArgExpanded.getSchema()); + aggValToValColumnMap.put(nonAggColumnIndex, index); + nonAggColumnIndex++; + resultColumns.add(""); + Field field = aggregateArgExpanded.getSchema().fields().get(index); + aggregateSchema.field(field.name(), field.schema()); + } + int udafIndexInAggSchema = resultColumns.size(); + for (FunctionCall functionCall: aggregateNode.getFunctionList()) { + KsqlAggregateFunction aggregateFunctionInfo = KsqlFunctions.getAggregateFunction(functionCall + .getName() + .toString(), + functionCall + .getArguments(), aggregateArgExpanded.getSchema()); + int udafIndex = expressionNames.get(functionCall.getArguments().get(0).toString()); + KsqlAggregateFunction aggregateFunction = aggregateFunctionInfo.getClass() + .getDeclaredConstructor(Integer.class).newInstance(udafIndex); + aggValToAggFunctionMap.put(udafIndexInAggSchema, aggregateFunction); + resultColumns.add(aggregateFunction.getIntialValue()); + + udafIndexInAggSchema++; + aggregateSchema.field("AGG_COL_" + + udafIndexInAggSchema, aggregateFunction.getReturnType()); + } + + Serde aggValueGenericRowSerde = SerDeUtil.getRowSerDe(streamSourceNode + .getStructuredDataSource() + .getKsqlTopic() + .getKsqlTopicSerDe(), + aggregateSchema); + + SchemaKTable schemaKTable = schemaKGroupedStream.aggregate( + new KudafInitializer(resultColumns), + new KudafAggregator(aggValToAggFunctionMap, + aggValToValColumnMap), aggregateNode.getWindowExpression(), + aggValueGenericRowSerde, "KSQL_Agg_Query_" + System.currentTimeMillis()); + + // Post aggregate computations + SchemaBuilder schemaBuilder = SchemaBuilder.struct(); + List fields = schemaKTable.getSchema().fields(); + for (int i = 0; i < aggregateNode.getRequiredColumnList().size(); i++) { + schemaBuilder.field(fields.get(i).name(), fields.get(i).schema()); + } + for (int aggFunctionVarSuffix = 0; + aggFunctionVarSuffix < aggregateNode.getFunctionList().size(); aggFunctionVarSuffix++) { + Schema fieldSchema; + String udafName = aggregateNode.getFunctionList().get(aggFunctionVarSuffix).getName() + .getSuffix(); + KsqlAggregateFunction aggregateFunction = KsqlFunctions.getAggregateFunction(udafName, + aggregateNode + .getFunctionList() + .get(aggFunctionVarSuffix).getArguments(), schemaKTable.getSchema()); + fieldSchema = aggregateFunction.getReturnType(); + schemaBuilder.field(AggregateExpressionRewriter.AGGREGATE_FUNCTION_VARIABLE_PREFIX + + aggFunctionVarSuffix, fieldSchema); + } + + Schema aggStageSchema = schemaBuilder.build(); + + SchemaKTable finalSchemaKTable = new SchemaKTable(aggStageSchema, schemaKTable.getKtable(), + schemaKTable.getKeyField(), + schemaKTable.getSourceSchemaKStreams(), + schemaKTable.isWindowed(), + SchemaKStream.Type.AGGREGATE); + + if (aggregateNode.getHavingExpressions() != null) { + finalSchemaKTable = finalSchemaKTable.filter(aggregateNode.getHavingExpressions()); + } + + SchemaKTable finalResult = finalSchemaKTable.select(aggregateNode.getFinalSelectExpressions()); + + return finalResult; + } + + private SchemaKStream buildProject(final ProjectNode projectNode, Map propsMap) + throws Exception { + SchemaKStream projectedSchemaStream = + kafkaStreamsDsl(projectNode.getSource()).select(projectNode.getProjectNameExpressionPairList()); + return projectedSchemaStream; + } + + + private SchemaKStream buildFilter(final FilterNode filterNode, Map propsMap) + throws Exception { + SchemaKStream + filteredSchemaKStream = + kafkaStreamsDsl(filterNode.getSource()).filter(filterNode.getPredicate()); + return filteredSchemaKStream; + } + + private SchemaKStream buildSource(final SourceNode sourceNode, Map props) { + + TopologyBuilder.AutoOffsetReset autoOffsetReset = null; + if (props.containsKey(StreamsConfig.COMMIT_INTERVAL_MS_CONFIG)) { + if (props.get(StreamsConfig.COMMIT_INTERVAL_MS_CONFIG).toString() + .equalsIgnoreCase("EARLIEST")) { + autoOffsetReset = TopologyBuilder.AutoOffsetReset.EARLIEST; + } else if (props.get(StreamsConfig.COMMIT_INTERVAL_MS_CONFIG).toString() + .equalsIgnoreCase("LATEST")) { + autoOffsetReset = TopologyBuilder.AutoOffsetReset.LATEST; + } + } + + + if (sourceNode instanceof StructuredDataSourceNode) { + StructuredDataSourceNode structuredDataSourceNode = (StructuredDataSourceNode) sourceNode; + + if (structuredDataSourceNode.getTimestampField() != null) { + int timestampColumnIndex = getTimeStampColumnIndex(structuredDataSourceNode + .getSchema(), + structuredDataSourceNode + .getTimestampField()); + ksqlConfig.put(KsqlConfig.KSQL_TIMESTAMP_COLUMN_INDEX, timestampColumnIndex); + } + + Serde + genericRowSerde = + SerDeUtil.getRowSerDe(structuredDataSourceNode.getStructuredDataSource() + .getKsqlTopic().getKsqlTopicSerDe(), + SchemaUtil.removeImplicitRowTimeRowKeyFromSchema( + structuredDataSourceNode.getSchema())); + + Serde genericRowSerdeAfterRead = + SerDeUtil.getRowSerDe(structuredDataSourceNode.getStructuredDataSource() + .getKsqlTopic().getKsqlTopicSerDe(), + structuredDataSourceNode.getSchema()); + + if (structuredDataSourceNode.getDataSourceType() + == StructuredDataSource.DataSourceType.KTABLE) { + + KsqlTable ksqlTable = (KsqlTable) structuredDataSourceNode.getStructuredDataSource(); + KTable ktable; + if (ksqlTable.isWindowed()) { + KStream + kstream = + builder + .stream(autoOffsetReset, new WindowedSerde(), genericRowSerde, + ksqlTable.getKsqlTopic().getKafkaTopicName()) + .map(new KeyValueMapper, GenericRow, KeyValue, + GenericRow>>() { + @Override + public KeyValue, GenericRow> apply( + Windowed key, GenericRow row) { + if (row != null) { + row.getColumns().add(0, + String.format("%s : Window{start=%d end=-}", key + .key(), key.window().start())); + + } + return new KeyValue<>(key, row); + } + }); + kstream = addTimestampColumn(kstream); + ktable = kstream + .groupByKey(new WindowedSerde(), genericRowSerdeAfterRead) + .reduce(new Reducer() { + @Override + public GenericRow apply(GenericRow aggValue, GenericRow newValue) { + return newValue; + } + }, ksqlTable.getStateStoreName()); + } else { + KStream + kstream = + builder + .stream(autoOffsetReset, Serdes.String(), genericRowSerde, + ksqlTable.getKsqlTopic().getKafkaTopicName()) + .map(new KeyValueMapper>() { + @Override + public KeyValue apply(String key, GenericRow row) { + if (row != null) { + row.getColumns().add(0, key); + + } + return new KeyValue<>(key, row); + } + }); + kstream = addTimestampColumn(kstream); + ktable = kstream.groupByKey(Serdes.String(), genericRowSerdeAfterRead) + .reduce(new Reducer() { + @Override + public GenericRow apply(GenericRow aggValue, GenericRow newValue) { + return newValue; + } + }, ksqlTable.getStateStoreName()); + } + + return new SchemaKTable(sourceNode.getSchema(), ktable, + sourceNode.getKeyField(), new ArrayList<>(), + ksqlTable.isWindowed(), + SchemaKStream.Type.SOURCE); + } + KsqlStream ksqlStream = (KsqlStream) structuredDataSourceNode.getStructuredDataSource(); + KStream + kstream = + builder + .stream(Serdes.String(), genericRowSerde, + ksqlStream.getKsqlTopic().getKafkaTopicName()) + .map(new KeyValueMapper>() { + @Override + public KeyValue apply(String key, GenericRow row) { + if (row != null) { + row.getColumns().add(0, key); + + } + return new KeyValue<>(key, row); + } + }); + kstream = addTimestampColumn(kstream); + return new SchemaKStream(sourceNode.getSchema(), kstream, + sourceNode.getKeyField(), new ArrayList<>(), + SchemaKStream.Type.SOURCE); + } + throw new KsqlException("Unsupported source logical node: " + sourceNode.getClass().getName()); + } + + private SchemaKStream buildJoin(final JoinNode joinNode, Map propsMap) + throws Exception { + SchemaKStream leftSchemaKStream = kafkaStreamsDsl(joinNode.getLeft()); + + propsMap.put(StreamsConfig.COMMIT_INTERVAL_MS_CONFIG, + TopologyBuilder.AutoOffsetReset.EARLIEST.toString()); + SchemaKStream rightSchemaKStream = kafkaStreamsDsl(joinNode.getRight(), propsMap); + if (rightSchemaKStream instanceof SchemaKTable) { + SchemaKTable rightSchemaKTable = (SchemaKTable) rightSchemaKStream; + + if ( + leftSchemaKStream.getKeyField() == null || + !leftSchemaKStream.getKeyField().name().equals(joinNode.getLeftKeyFieldName())) { + leftSchemaKStream = + leftSchemaKStream.selectKey(SchemaUtil.getFieldByName(leftSchemaKStream.getSchema(), + joinNode.getLeftKeyFieldName()).get()); + } + SchemaKStream joinSchemaKStream; + switch (joinNode.getType()) { + case LEFT: + KsqlTopicSerDe joinSerDe = getResultTopicSerde(joinNode); + String joinKeyFieldName = (joinNode.getLeftAlias() + "." + leftSchemaKStream + .getKeyField().name()); + joinSchemaKStream = + leftSchemaKStream.leftJoin(rightSchemaKTable, joinNode.getSchema(), + joinNode.getSchema().field(joinKeyFieldName), joinSerDe); + break; + default: + throw new KsqlException("Join type is not supportd yet: " + joinNode.getType()); + } + return joinSchemaKStream; + } + + throw new KsqlException("Unsupported join logical node: Left: " + + joinNode.getLeft() + " , Right: " + joinNode.getRight()); + } + + private KsqlTopicSerDe getResultTopicSerde(final PlanNode node) { + if (node instanceof StructuredDataSourceNode) { + StructuredDataSourceNode structuredDataSourceNode = (StructuredDataSourceNode) node; + return structuredDataSourceNode.getStructuredDataSource().getKsqlTopic().getKsqlTopicSerDe(); + } else if (node instanceof JoinNode) { + JoinNode joinNode = (JoinNode) node; + + KsqlTopicSerDe leftTopicSerDe = getResultTopicSerde(joinNode.getLeft()); + return leftTopicSerDe; + } else { + return getResultTopicSerde(node.getSources().get(0)); + } + } + + + public KStreamBuilder getBuilder() { + return builder; + } + + public OutputNode getPlanSink() { + return planSink; + } + + private KsqlStructuredDataOutputNode addAvroSchemaToResultTopic( + final KsqlStructuredDataOutputNode ksqlStructuredDataOutputNode) { + MetastoreUtil metastoreUtil = new MetastoreUtil(); + String + avroSchema = + metastoreUtil.buildAvroSchema(ksqlStructuredDataOutputNode.getSchema(), + ksqlStructuredDataOutputNode.getKsqlTopic().getName()); + KsqlAvroTopicSerDe ksqlAvroTopicSerDe = + new KsqlAvroTopicSerDe(avroSchema); + KsqlTopic newKsqlTopic = new KsqlTopic(ksqlStructuredDataOutputNode.getKsqlTopic() + .getName(), ksqlStructuredDataOutputNode + .getKsqlTopic().getKafkaTopicName(), ksqlAvroTopicSerDe); + + KsqlStructuredDataOutputNode newKsqlStructuredDataOutputNode = new KsqlStructuredDataOutputNode( + ksqlStructuredDataOutputNode.getId(), ksqlStructuredDataOutputNode.getSource(), + ksqlStructuredDataOutputNode.getSchema(), ksqlStructuredDataOutputNode.getTimestampField(), + ksqlStructuredDataOutputNode.getKeyField(), newKsqlTopic, + ksqlStructuredDataOutputNode.getKafkaTopicName(), + ksqlStructuredDataOutputNode.getOutputProperties(), + ksqlStructuredDataOutputNode.getLimit()); + return newKsqlStructuredDataOutputNode; + } + + private SchemaKStream aggregateReKey(final AggregateNode aggregateNode, + final SchemaKStream sourceSchemaKStream) { + String aggregateKeyName = ""; + List newKeyIndexes = new ArrayList<>(); + boolean addSeparator = false; + for (Expression groupByExpr : aggregateNode.getGroupByExpressions()) { + if (addSeparator) { + aggregateKeyName += "|+|"; + } else { + addSeparator = true; + } + aggregateKeyName += groupByExpr.toString(); + newKeyIndexes.add(getIndexInSchema(groupByExpr.toString(), sourceSchemaKStream.getSchema())); + } + + KStream rekeyedKStream = sourceSchemaKStream.getKstream().selectKey(new KeyValueMapper() { + + @Override + public String apply(String key, GenericRow value) { + String newKey = ""; + boolean addSeparator = false; + for (int index : newKeyIndexes) { + if (addSeparator) { + newKey += "|+|"; + } else { + addSeparator = true; + } + newKey += String.valueOf(value.getColumns().get(index)); + } + return newKey; + } + }); + + Field newKeyField = new Field(aggregateKeyName, -1, Schema.STRING_SCHEMA); + + return new SchemaKStream(sourceSchemaKStream.getSchema(), rekeyedKStream, newKeyField, + Arrays.asList(sourceSchemaKStream), SchemaKStream.Type.REKEY); + } + + private int getIndexInSchema(final String fieldName, final Schema schema) { + List fields = schema.fields(); + for (int i = 0; i < fields.size(); i++) { + Field field = fields.get(i); + if (field.name().equals(fieldName)) { + return i; + } + } + return -1; + } + + private KStream addTimestampColumn(final KStream kstream) { + return kstream.transformValues(new ValueTransformerSupplier() { + @Override + public ValueTransformer get() { + return new ValueTransformer() { + ProcessorContext processorContext; + @Override + public void init(ProcessorContext processorContext) { + this.processorContext = processorContext; + } + + @Override + public GenericRow transform(GenericRow row) { + if (row != null) { + row.getColumns().add(0, processorContext.timestamp()); + + } + return row; + } + + @Override + public GenericRow punctuate(long l) { + return null; + } + + @Override + public void close() { + + } + }; + } + }); + } + + private int getTimeStampColumnIndex(final Schema schema, final Field timestampField) { + String timestampFieldName = timestampField.name(); + if (timestampFieldName.contains(".")) { + for (int i = 2; i < schema.fields().size(); i++) { + Field field = schema.fields().get(i); + if (field.name().contains(".")) { + if (timestampFieldName.equals(field.name())) { + return i - 2; + } + } else { + if (timestampFieldName + .substring(timestampFieldName.indexOf(".") + 1).equals(field.name())) { + return i - 2; + } + } + } + } else { + for (int i = 2; i < schema.fields().size(); i++) { + Field field = schema.fields().get(i); + if (field.name().contains(".")) { + if (timestampFieldName.equals(field.name().substring(field.name().indexOf(".") + 1))) { + return i - 2; + } + } else { + if (timestampFieldName.equals(field.name())) { + return i - 2; + } + } + } + } + return -1; + } +} + diff --git a/ksql-core/src/main/java/io/confluent/ksql/planner/DefaultTraversalVisitor.java b/ksql-core/src/main/java/io/confluent/ksql/planner/DefaultTraversalVisitor.java new file mode 100644 index 000000000000..1ddb2e606ddd --- /dev/null +++ b/ksql-core/src/main/java/io/confluent/ksql/planner/DefaultTraversalVisitor.java @@ -0,0 +1,432 @@ +/** + * Copyright 2017 Confluent Inc. + **/ + +package io.confluent.ksql.planner; + + +import io.confluent.ksql.parser.tree.AliasedRelation; +import io.confluent.ksql.parser.tree.ArithmeticBinaryExpression; +import io.confluent.ksql.parser.tree.ArithmeticUnaryExpression; +import io.confluent.ksql.parser.tree.AstVisitor; +import io.confluent.ksql.parser.tree.BetweenPredicate; +import io.confluent.ksql.parser.tree.Cast; +import io.confluent.ksql.parser.tree.ComparisonExpression; +import io.confluent.ksql.parser.tree.CreateTableAsSelect; +import io.confluent.ksql.parser.tree.Delete; +import io.confluent.ksql.parser.tree.DereferenceExpression; +import io.confluent.ksql.parser.tree.Expression; +import io.confluent.ksql.parser.tree.Extract; +import io.confluent.ksql.parser.tree.FrameBound; +import io.confluent.ksql.parser.tree.FunctionCall; +import io.confluent.ksql.parser.tree.GroupBy; +import io.confluent.ksql.parser.tree.GroupingElement; +import io.confluent.ksql.parser.tree.InListExpression; +import io.confluent.ksql.parser.tree.InPredicate; +import io.confluent.ksql.parser.tree.IsNotNullPredicate; +import io.confluent.ksql.parser.tree.IsNullPredicate; +import io.confluent.ksql.parser.tree.Join; +import io.confluent.ksql.parser.tree.JoinOn; +import io.confluent.ksql.parser.tree.LikePredicate; +import io.confluent.ksql.parser.tree.LogicalBinaryExpression; +import io.confluent.ksql.parser.tree.NotExpression; +import io.confluent.ksql.parser.tree.NullIfExpression; +import io.confluent.ksql.parser.tree.Query; +import io.confluent.ksql.parser.tree.QuerySpecification; +import io.confluent.ksql.parser.tree.Relation; +import io.confluent.ksql.parser.tree.Row; +import io.confluent.ksql.parser.tree.SampledRelation; +import io.confluent.ksql.parser.tree.SearchedCaseExpression; +import io.confluent.ksql.parser.tree.Select; +import io.confluent.ksql.parser.tree.SelectItem; +import io.confluent.ksql.parser.tree.SetOperation; +import io.confluent.ksql.parser.tree.SimpleCaseExpression; +import io.confluent.ksql.parser.tree.SimpleGroupBy; +import io.confluent.ksql.parser.tree.SingleColumn; +import io.confluent.ksql.parser.tree.SortItem; +import io.confluent.ksql.parser.tree.Statement; +import io.confluent.ksql.parser.tree.Statements; +import io.confluent.ksql.parser.tree.SubqueryExpression; +import io.confluent.ksql.parser.tree.SubscriptExpression; +import io.confluent.ksql.parser.tree.TableSubquery; +import io.confluent.ksql.parser.tree.Values; +import io.confluent.ksql.parser.tree.WhenClause; +import io.confluent.ksql.parser.tree.Window; +import io.confluent.ksql.parser.tree.WindowFrame; +import io.confluent.ksql.parser.tree.With; +import io.confluent.ksql.parser.tree.WithQuery; + +import java.util.Set; + +public abstract class DefaultTraversalVisitor + extends AstVisitor { + + @Override + protected R visitStatements(Statements node, C context) { + for (Statement statement : node.statementList) { + process(statement, context); + } + return visitNode(node, context); + } + + @Override + protected R visitExtract(Extract node, C context) { + return process(node.getExpression(), context); + } + + @Override + protected R visitCast(Cast node, C context) { + return process(node.getExpression(), context); + } + + @Override + protected R visitArithmeticBinary(ArithmeticBinaryExpression node, C context) { + process(node.getLeft(), context); + process(node.getRight(), context); + + return null; + } + + @Override + protected R visitBetweenPredicate(BetweenPredicate node, C context) { + process(node.getValue(), context); + process(node.getMin(), context); + process(node.getMax(), context); + + return null; + } + + @Override + protected R visitSubscriptExpression(SubscriptExpression node, C context) { + process(node.getBase(), context); + process(node.getIndex(), context); + + return null; + } + + @Override + protected R visitComparisonExpression(ComparisonExpression node, C context) { + process(node.getLeft(), context); + process(node.getRight(), context); + + return null; + } + + @Override + protected R visitQuery(Query node, C context) { + if (node.getWith().isPresent()) { + process(node.getWith().get(), context); + } + process(node.getQueryBody(), context); + for (SortItem sortItem : node.getOrderBy()) { + process(sortItem, context); + } + + return null; + } + + @Override + protected R visitWith(With node, C context) { + for (WithQuery query : node.getQueries()) { + process(query, context); + } + + return null; + } + + @Override + protected R visitWithQuery(WithQuery node, C context) { + return process(node.getQuery(), context); + } + + @Override + protected R visitSelect(Select node, C context) { + for (SelectItem item : node.getSelectItems()) { + process(item, context); + } + + return null; + } + + @Override + protected R visitSingleColumn(SingleColumn node, C context) { + process(node.getExpression(), context); + + return null; + } + + @Override + protected R visitWhenClause(WhenClause node, C context) { + process(node.getOperand(), context); + process(node.getResult(), context); + + return null; + } + + @Override + protected R visitInPredicate(InPredicate node, C context) { + process(node.getValue(), context); + process(node.getValueList(), context); + + return null; + } + + @Override + protected R visitFunctionCall(FunctionCall node, C context) { + for (Expression argument : node.getArguments()) { + process(argument, context); + } + + if (node.getWindow().isPresent()) { + process(node.getWindow().get(), context); + } + + return null; + } + + @Override + protected R visitDereferenceExpression(DereferenceExpression node, C context) { + process(node.getBase(), context); + return null; + } + + @Override + public R visitWindow(Window node, C context) { + process(node.getWindowExpression(), context); + return null; + } + + @Override + public R visitWindowFrame(WindowFrame node, C context) { + process(node.getStart(), context); + if (node.getEnd().isPresent()) { + process(node.getEnd().get(), context); + } + + return null; + } + + @Override + public R visitFrameBound(FrameBound node, C context) { + if (node.getValue().isPresent()) { + process(node.getValue().get(), context); + } + + return null; + } + + @Override + protected R visitSimpleCaseExpression(SimpleCaseExpression node, C context) { + process(node.getOperand(), context); + for (WhenClause clause : node.getWhenClauses()) { + process(clause, context); + } + + node.getDefaultValue() + .ifPresent(value -> process(value, context)); + + return null; + } + + @Override + protected R visitInListExpression(InListExpression node, C context) { + for (Expression value : node.getValues()) { + process(value, context); + } + + return null; + } + + @Override + protected R visitNullIfExpression(NullIfExpression node, C context) { + process(node.getFirst(), context); + process(node.getSecond(), context); + + return null; + } + + @Override + protected R visitArithmeticUnary(ArithmeticUnaryExpression node, C context) { + return process(node.getValue(), context); + } + + @Override + protected R visitNotExpression(NotExpression node, C context) { + return process(node.getValue(), context); + } + + @Override + protected R visitSearchedCaseExpression(SearchedCaseExpression node, C context) { + for (WhenClause clause : node.getWhenClauses()) { + process(clause, context); + } + node.getDefaultValue() + .ifPresent(value -> process(value, context)); + + return null; + } + + @Override + protected R visitLikePredicate(LikePredicate node, C context) { + process(node.getValue(), context); + process(node.getPattern(), context); + if (node.getEscape() != null) { + process(node.getEscape(), context); + } + + return null; + } + + @Override + protected R visitIsNotNullPredicate(IsNotNullPredicate node, C context) { + return process(node.getValue(), context); + } + + @Override + protected R visitIsNullPredicate(IsNullPredicate node, C context) { + return process(node.getValue(), context); + } + + @Override + protected R visitLogicalBinaryExpression(LogicalBinaryExpression node, C context) { + process(node.getLeft(), context); + process(node.getRight(), context); + + return null; + } + + @Override + protected R visitSubqueryExpression(SubqueryExpression node, C context) { + return process(node.getQuery(), context); + } + + @Override + protected R visitSortItem(SortItem node, C context) { + return process(node.getSortKey(), context); + } + + @Override + protected R visitQuerySpecification(QuerySpecification node, C context) { + process(node.getSelect(), context); + if (node.getFrom().isPresent()) { + process(node.getFrom().get(), context); + } + if (node.getWhere().isPresent()) { + process(node.getWhere().get(), context); + } + if (node.getGroupBy().isPresent()) { + process(node.getGroupBy().get(), context); + } + if (node.getHaving().isPresent()) { + process(node.getHaving().get(), context); + } + for (SortItem sortItem : node.getOrderBy()) { + process(sortItem, context); + } + return null; + } + + @Override + protected R visitSetOperation(SetOperation node, C context) { + for (Relation relation : node.getRelations()) { + process(relation, context); + } + return null; + } + + @Override + protected R visitValues(Values node, C context) { + for (Expression row : node.getRows()) { + process(row, context); + } + return null; + } + + @Override + protected R visitRow(Row node, C context) { + for (Expression expression : node.getItems()) { + process(expression, context); + } + return null; + } + + @Override + protected R visitTableSubquery(TableSubquery node, C context) { + return process(node.getQuery(), context); + } + + @Override + protected R visitAliasedRelation(AliasedRelation node, C context) { + return process(node.getRelation(), context); + } + + @Override + protected R visitSampledRelation(SampledRelation node, C context) { + process(node.getRelation(), context); + process(node.getSamplePercentage(), context); + if (node.getColumnsToStratifyOn().isPresent()) { + for (Expression expression : node.getColumnsToStratifyOn().get()) { + process(expression, context); + } + } + return null; + } + + @Override + protected R visitJoin(Join node, C context) { + process(node.getLeft(), context); + process(node.getRight(), context); + + node.getCriteria() + .filter(criteria -> criteria instanceof JoinOn) + .map(criteria -> process(((JoinOn) criteria).getExpression(), context)); + + return null; + } + + @Override + protected R visitGroupBy(GroupBy node, C context) { + for (GroupingElement groupingElement : node.getGroupingElements()) { + process(groupingElement, context); + } + + return null; + } + + @Override + protected R visitGroupingElement(GroupingElement node, C context) { + for (Set expressions : node.enumerateGroupingSets()) { + for (Expression expression : expressions) { + process(expression, context); + } + } + return null; + } + + @Override + protected R visitSimpleGroupBy(SimpleGroupBy node, C context) { + visitGroupingElement(node, context); + + for (Expression expression : node.getColumnExpressions()) { + process(expression, context); + } + + return null; + } + + @Override + protected R visitDelete(Delete node, C context) { + process(node.getTable(), context); + node.getWhere().ifPresent(where -> process(where, context)); + + return null; + } + + @Override + protected R visitCreateTableAsSelect(CreateTableAsSelect node, C context) { + process(node.getQuery(), context); + node.getProperties().values().forEach(expression -> process(expression, context)); + + return null; + } +} diff --git a/ksql-core/src/main/java/io/confluent/ksql/planner/LogicalPlanner.java b/ksql-core/src/main/java/io/confluent/ksql/planner/LogicalPlanner.java new file mode 100644 index 000000000000..285899feaf0a --- /dev/null +++ b/ksql-core/src/main/java/io/confluent/ksql/planner/LogicalPlanner.java @@ -0,0 +1,179 @@ +/** + * Copyright 2017 Confluent Inc. + **/ + +package io.confluent.ksql.planner; + +import io.confluent.ksql.analyzer.AggregateAnalysis; +import io.confluent.ksql.analyzer.Analysis; +import io.confluent.ksql.metastore.KsqlStdOut; +import io.confluent.ksql.metastore.KsqlStream; +import io.confluent.ksql.metastore.KsqlTable; +import io.confluent.ksql.metastore.StructuredDataSource; +import io.confluent.ksql.parser.tree.Expression; +import io.confluent.ksql.planner.plan.AggregateNode; +import io.confluent.ksql.planner.plan.FilterNode; +import io.confluent.ksql.planner.plan.KsqlBareOutputNode; +import io.confluent.ksql.planner.plan.KsqlStructuredDataOutputNode; +import io.confluent.ksql.planner.plan.OutputNode; +import io.confluent.ksql.planner.plan.PlanNode; +import io.confluent.ksql.planner.plan.PlanNodeId; +import io.confluent.ksql.planner.plan.ProjectNode; +import io.confluent.ksql.planner.plan.SourceNode; +import io.confluent.ksql.planner.plan.StructuredDataSourceNode; +import io.confluent.ksql.util.ExpressionTypeManager; +import io.confluent.ksql.util.KsqlConfig; +import io.confluent.ksql.util.SchemaUtil; +import org.apache.kafka.connect.data.Field; +import org.apache.kafka.connect.data.Schema; +import org.apache.kafka.connect.data.SchemaBuilder; + +import java.util.ArrayList; +import java.util.List; + +public class LogicalPlanner { + + private Analysis analysis; + private AggregateAnalysis aggregateAnalysis; + + public LogicalPlanner(Analysis analysis, AggregateAnalysis aggregateAnalysis) { + this.analysis = analysis; + this.aggregateAnalysis = aggregateAnalysis; + } + + public PlanNode buildPlan() { + PlanNode currentNode; + if (analysis.getJoin() != null) { + currentNode = analysis.getJoin(); + } else { + SourceNode sourceNode = buildSourceNode(); + currentNode = sourceNode; + } + if (analysis.getWhereExpression() != null) { + FilterNode filterNode = buildFilterNode(currentNode.getSchema(), currentNode); + currentNode = filterNode; + } + if ((analysis.getGroupByExpressions() != null) && (!analysis.getGroupByExpressions() + .isEmpty())) { + AggregateNode aggregateNode = buildAggregateNode(currentNode.getSchema(), currentNode); + currentNode = aggregateNode; + } else { + ProjectNode projectNode = buildProjectNode(currentNode.getSchema(), currentNode); + currentNode = projectNode; + } + + OutputNode outputNode = buildOutputNode(currentNode.getSchema(), currentNode); + return outputNode; + } + + private OutputNode buildOutputNode(final Schema inputSchema, final PlanNode sourcePlanNode) { + StructuredDataSource intoDataSource = analysis.getInto(); + + if (intoDataSource instanceof KsqlStdOut) { + return new KsqlBareOutputNode(new PlanNodeId(KsqlStdOut.KSQL_STDOUT_NAME), sourcePlanNode, + inputSchema, analysis.getLimitClause()); + } else if (intoDataSource instanceof StructuredDataSource) { + StructuredDataSource intoStructuredDataSource = (StructuredDataSource) intoDataSource; + + Field timestampField = null; + if (analysis.getIntoProperties().get(KsqlConfig.SINK_TIMESTAMP_COLUMN_NAME) != null) { + timestampField = + SchemaUtil.getFieldByName(inputSchema, + analysis.getIntoProperties() + .get(KsqlConfig.SINK_TIMESTAMP_COLUMN_NAME) + .toString()).get(); + } + + return new KsqlStructuredDataOutputNode(new PlanNodeId(intoDataSource.getName()), + sourcePlanNode, + inputSchema, timestampField, sourcePlanNode + .getKeyField(), + intoStructuredDataSource.getKsqlTopic(), + intoStructuredDataSource.getKsqlTopic() + .getTopicName(), analysis.getIntoProperties(), + analysis.getLimitClause()); + + } + throw new RuntimeException("INTO clause is not supported in SELECT."); + } + + private AggregateNode buildAggregateNode(final Schema inputSchema, + final PlanNode sourcePlanNode) { + + SchemaBuilder aggregateSchema = SchemaBuilder.struct(); + ExpressionTypeManager expressionTypeManager = new ExpressionTypeManager(inputSchema); + for (int i = 0; i < analysis.getSelectExpressions().size(); i++) { + Expression expression = analysis.getSelectExpressions().get(i); + String alias = analysis.getSelectExpressionAlias().get(i); + + Schema expressionType = expressionTypeManager.getExpressionType(expression); + + aggregateSchema = aggregateSchema.field(alias, expressionType); + + } + + return new AggregateNode(new PlanNodeId("Aggregate"), sourcePlanNode, aggregateSchema, + analysis.getSelectExpressions(), analysis.getGroupByExpressions(), + analysis.getWindowExpression(), + aggregateAnalysis.getAggregateFunctionArguments(), + aggregateAnalysis.getFunctionList(), + aggregateAnalysis.getRequiredColumnsList(), + aggregateAnalysis.getNonAggResultColumns(), + aggregateAnalysis.getFinalSelectExpressions(), + aggregateAnalysis.getHavingExpression()); + } + + private ProjectNode buildProjectNode(final Schema inputSchema, final PlanNode sourcePlanNode) { + List projectionFields = new ArrayList<>(); + List fieldNames = new ArrayList<>(); + + SchemaBuilder projectionSchema = SchemaBuilder.struct(); + ExpressionTypeManager expressionTypeManager = new ExpressionTypeManager(inputSchema); + for (int i = 0; i < analysis.getSelectExpressions().size(); i++) { + Expression expression = analysis.getSelectExpressions().get(i); + String alias = analysis.getSelectExpressionAlias().get(i); + + Schema expressionType = expressionTypeManager.getExpressionType(expression); + + projectionSchema = projectionSchema.field(alias, expressionType); + + } + + return new ProjectNode(new PlanNodeId("Project"), sourcePlanNode, projectionSchema, + analysis.getSelectExpressions()); + } + + private FilterNode buildFilterNode(final Schema inputSchema, final PlanNode sourcePlanNode) { + + Expression filterExpression = analysis.getWhereExpression(); + return new FilterNode(new PlanNodeId("Filter"), sourcePlanNode, filterExpression); + } + + private SourceNode buildSourceNode() { + + StructuredDataSource fromDataSource = analysis.getFromDataSources().get(0).getLeft(); + String alias = analysis.getFromDataSources().get(0).getRight(); + Schema fromSchema = SchemaUtil.buildSchemaWithAlias(fromDataSource.getSchema(), alias); + + if (fromDataSource instanceof KsqlStream) { + KsqlStream fromStream = (KsqlStream) fromDataSource; + return new StructuredDataSourceNode(new PlanNodeId("KsqlTopic"), fromSchema, + fromDataSource.getKeyField(), + fromDataSource.getTimestampField(), + fromStream.getKsqlTopic().getTopicName(), + alias, fromStream.getDataSourceType(), + fromStream); + } else if (fromDataSource instanceof KsqlTable) { + KsqlTable fromTable = (KsqlTable) fromDataSource; + return new StructuredDataSourceNode(new PlanNodeId("KsqlTopic"), fromSchema, + fromDataSource.getKeyField(), + fromDataSource.getTimestampField(), + fromTable.getKsqlTopic().getTopicName(), + alias, fromTable.getDataSourceType(), + fromTable); + } + + throw new RuntimeException("Data source is not supported yet."); + } + +} diff --git a/ksql-core/src/main/java/io/confluent/ksql/planner/PlanException.java b/ksql-core/src/main/java/io/confluent/ksql/planner/PlanException.java new file mode 100644 index 000000000000..6c5483fb28fc --- /dev/null +++ b/ksql-core/src/main/java/io/confluent/ksql/planner/PlanException.java @@ -0,0 +1,13 @@ +/** + * Copyright 2017 Confluent Inc. + **/ + +package io.confluent.ksql.planner; + +public class PlanException extends RuntimeException { + + public PlanException(final String message) { + super(message); + } + +} diff --git a/ksql-core/src/main/java/io/confluent/ksql/planner/plan/AggregateNode.java b/ksql-core/src/main/java/io/confluent/ksql/planner/plan/AggregateNode.java new file mode 100644 index 000000000000..ba7c76a8b11b --- /dev/null +++ b/ksql-core/src/main/java/io/confluent/ksql/planner/plan/AggregateNode.java @@ -0,0 +1,142 @@ +/** + * Copyright 2017 Confluent Inc. + **/ + +package io.confluent.ksql.planner.plan; + +import com.fasterxml.jackson.annotation.JsonCreator; +import com.fasterxml.jackson.annotation.JsonProperty; +import com.google.common.collect.ImmutableList; +import io.confluent.ksql.parser.tree.Expression; +import io.confluent.ksql.parser.tree.FunctionCall; +import io.confluent.ksql.parser.tree.WindowExpression; +import io.confluent.ksql.util.KsqlException; +import io.confluent.ksql.util.Pair; + +import org.apache.kafka.connect.data.Field; +import org.apache.kafka.connect.data.Schema; + +import java.util.ArrayList; +import java.util.List; + + +public class AggregateNode extends PlanNode { + + private final PlanNode source; + private final Schema schema; + private final List projectExpressions; + private final List groupByExpressions; + private final WindowExpression windowExpression; + private final List aggregateFunctionArguments; + + private final List functionList; + private final List requiredColumnList; + + private final List nonAggResultColumns; + + private final List finalSelectExpressions; + + private final Expression havingExpressions; + + @JsonCreator + public AggregateNode(@JsonProperty("id") final PlanNodeId id, + @JsonProperty("source") final PlanNode source, + @JsonProperty("schema") final Schema schema, + @JsonProperty("projectExpressions") + final List projectExpressions, + @JsonProperty("groupby") final List groupByExpressions, + @JsonProperty("window") final WindowExpression windowExpression, + @JsonProperty("aggregateFunctionArguments") + final List aggregateFunctionArguments, + @JsonProperty("functionList") final List functionList, + @JsonProperty("requiredColumnList") final List + requiredColumnList, + @JsonProperty("nonAggResultColumns") final List + nonAggResultColumns, + @JsonProperty("finalSelectExpressions") final List + finalSelectExpressions, + @JsonProperty("havingExpressions") final Expression + havingExpressions) { + super(id); + + this.source = source; + this.schema = schema; + this.projectExpressions = projectExpressions; + this.groupByExpressions = groupByExpressions; + this.windowExpression = windowExpression; + this.aggregateFunctionArguments = aggregateFunctionArguments; + this.functionList = functionList; + this.requiredColumnList = requiredColumnList; + this.nonAggResultColumns = nonAggResultColumns; + this.finalSelectExpressions = finalSelectExpressions; + this.havingExpressions = havingExpressions; + } + + @Override + public Schema getSchema() { + return this.schema; + } + + @Override + public Field getKeyField() { + return null; + } + + @Override + public List getSources() { + return ImmutableList.of(source); + } + + public PlanNode getSource() { + return source; + } + + public List getGroupByExpressions() { + return groupByExpressions; + } + + public List getProjectExpressions() { + return projectExpressions; + } + + public WindowExpression getWindowExpression() { + return windowExpression; + } + + public List getAggregateFunctionArguments() { + return aggregateFunctionArguments; + } + + public List getFunctionList() { + return functionList; + } + + public List getRequiredColumnList() { + return requiredColumnList; + } + + public List getNonAggResultColumns() { + return nonAggResultColumns; + } + + public List> getFinalSelectExpressions() { + List> finalSelectExpressionList = new ArrayList<>(); + if (finalSelectExpressions.size() != schema.fields().size()) { + throw new KsqlException("Incompatible aggregate schema."); + } + for (int i = 0; i < finalSelectExpressions.size(); i++) { + finalSelectExpressionList.add(new Pair<>(schema.fields().get(i).name(), + finalSelectExpressions.get(i))); + } + return finalSelectExpressionList; + } + + public Expression getHavingExpressions() { + return havingExpressions; + } + + @Override + public R accept(PlanVisitor visitor, C context) { + return visitor.visitAggregate(this, context); + } +} diff --git a/ksql-core/src/main/java/io/confluent/ksql/planner/plan/FilterNode.java b/ksql-core/src/main/java/io/confluent/ksql/planner/plan/FilterNode.java new file mode 100644 index 000000000000..d9501a129945 --- /dev/null +++ b/ksql-core/src/main/java/io/confluent/ksql/planner/plan/FilterNode.java @@ -0,0 +1,67 @@ +/** + * Copyright 2017 Confluent Inc. + **/ + +package io.confluent.ksql.planner.plan; + +import com.fasterxml.jackson.annotation.JsonCreator; +import com.fasterxml.jackson.annotation.JsonProperty; +import com.google.common.collect.ImmutableList; +import io.confluent.ksql.parser.tree.Expression; +import org.apache.kafka.connect.data.Field; +import org.apache.kafka.connect.data.Schema; + +import javax.annotation.concurrent.Immutable; +import java.util.List; + +@Immutable +public class FilterNode + extends PlanNode { + + private final PlanNode source; + private final Expression predicate; + private final Schema schema; + private final Field keyField; + + @JsonCreator + public FilterNode(@JsonProperty("id") final PlanNodeId id, + @JsonProperty("source") final PlanNode source, + @JsonProperty("predicate") final Expression predicate) { + super(id); + + this.source = source; + this.schema = source.getSchema(); + this.predicate = predicate; + this.keyField = source.getKeyField(); + } + + @JsonProperty("predicate") + public Expression getPredicate() { + return predicate; + } + + @Override + public Schema getSchema() { + return this.schema; + } + + @Override + public Field getKeyField() { + return keyField; + } + + @Override + public List getSources() { + return ImmutableList.of(source); + } + + @JsonProperty("source") + public PlanNode getSource() { + return source; + } + + @Override + public R accept(PlanVisitor visitor, C context) { + return visitor.visitFilter(this, context); + } +} diff --git a/ksql-core/src/main/java/io/confluent/ksql/planner/plan/JoinNode.java b/ksql-core/src/main/java/io/confluent/ksql/planner/plan/JoinNode.java new file mode 100644 index 000000000000..a2857dec2638 --- /dev/null +++ b/ksql-core/src/main/java/io/confluent/ksql/planner/plan/JoinNode.java @@ -0,0 +1,115 @@ +/** + * Copyright 2017 Confluent Inc. + **/ + +package io.confluent.ksql.planner.plan; + +import com.fasterxml.jackson.annotation.JsonProperty; +import org.apache.kafka.connect.data.Field; +import org.apache.kafka.connect.data.Schema; +import org.apache.kafka.connect.data.SchemaBuilder; + +import java.util.Arrays; +import java.util.List; + +public class JoinNode extends PlanNode { + + public enum Type { + CROSS, INNER, LEFT, RIGHT, FULL, IMPLICIT + } + + private final Type type; + private final PlanNode left; + private final PlanNode right; + private final Schema schema; + private final String leftKeyFieldName; + private final String rightKeyFieldName; + + private final String leftAlias; + private final String rightAlias; + private final Field keyField; + + public JoinNode(@JsonProperty("id") final PlanNodeId id, + @JsonProperty("type") final Type type, + @JsonProperty("left") final PlanNode left, + @JsonProperty("right") final PlanNode right, + @JsonProperty("leftKeyFieldName") final String leftKeyFieldName, + @JsonProperty("rightKeyFieldName") final String rightKeyFieldName, + @JsonProperty("leftAlias") final String leftAlias, + @JsonProperty("rightAlias") final String rightAlias) { + + // TODO: Type should be derived. + super(id); + this.type = type; + this.left = left; + this.right = right; + this.leftKeyFieldName = leftKeyFieldName; + this.rightKeyFieldName = rightKeyFieldName; + this.leftAlias = leftAlias; + this.rightAlias = rightAlias; + this.schema = buildSchema(left, right); + this.keyField = this.schema.field((leftAlias + "." + leftKeyFieldName)); + } + + private Schema buildSchema(final PlanNode left, final PlanNode right) { + + Schema leftSchema = left.getSchema(); + Schema rightSchema = right.getSchema(); + + SchemaBuilder schemaBuilder = SchemaBuilder.struct(); + + for (Field field : leftSchema.fields()) { + String fieldName = leftAlias + "." + field.name(); + schemaBuilder.field(fieldName, field.schema()); + } + + for (Field field : rightSchema.fields()) { + String fieldName = rightAlias + "." + field.name(); + schemaBuilder.field(fieldName, field.schema()); + } + return schemaBuilder.build(); + } + + @Override + public Schema getSchema() { + return this.schema; + } + + @Override + public Field getKeyField() { + return this.keyField; + } + + @Override + public List getSources() { + return Arrays.asList(left, right); + } + + public PlanNode getLeft() { + return left; + } + + public PlanNode getRight() { + return right; + } + + public String getLeftKeyFieldName() { + return leftKeyFieldName; + } + + public String getRightKeyFieldName() { + return rightKeyFieldName; + } + + public String getLeftAlias() { + return leftAlias; + } + + public String getRightAlias() { + return rightAlias; + } + + public Type getType() { + return type; + } +} diff --git a/ksql-core/src/main/java/io/confluent/ksql/planner/plan/KsqlBareOutputNode.java b/ksql-core/src/main/java/io/confluent/ksql/planner/plan/KsqlBareOutputNode.java new file mode 100644 index 000000000000..077d1b442aaa --- /dev/null +++ b/ksql-core/src/main/java/io/confluent/ksql/planner/plan/KsqlBareOutputNode.java @@ -0,0 +1,34 @@ +/** + * Copyright 2017 Confluent Inc. + **/ + +package io.confluent.ksql.planner.plan; + +import com.fasterxml.jackson.annotation.JsonCreator; +import com.fasterxml.jackson.annotation.JsonProperty; +import org.apache.kafka.connect.data.Field; +import org.apache.kafka.connect.data.Schema; + +import java.util.Optional; + +public class KsqlBareOutputNode extends OutputNode { + + @JsonCreator + public KsqlBareOutputNode(@JsonProperty("id") final PlanNodeId id, + @JsonProperty("source") final PlanNode source, + @JsonProperty("schema") final Schema schema, + @JsonProperty("limit") final Optional limit) { + super(id, source, schema, limit); + + + } + + public String getKafkaTopicName() { + return null; + } + + @Override + public Field getKeyField() { + return null; + } +} diff --git a/ksql-core/src/main/java/io/confluent/ksql/planner/plan/KsqlStructuredDataOutputNode.java b/ksql-core/src/main/java/io/confluent/ksql/planner/plan/KsqlStructuredDataOutputNode.java new file mode 100644 index 000000000000..d14610178db6 --- /dev/null +++ b/ksql-core/src/main/java/io/confluent/ksql/planner/plan/KsqlStructuredDataOutputNode.java @@ -0,0 +1,65 @@ +/** + * Copyright 2017 Confluent Inc. + **/ + +package io.confluent.ksql.planner.plan; + +import com.fasterxml.jackson.annotation.JsonCreator; +import com.fasterxml.jackson.annotation.JsonProperty; +import io.confluent.ksql.metastore.KsqlTopic; +import org.apache.kafka.connect.data.Field; +import org.apache.kafka.connect.data.Schema; + +import java.util.Map; +import java.util.Optional; + +public class KsqlStructuredDataOutputNode extends OutputNode { + + final String kafkaTopicName; + final KsqlTopic ksqlTopic; + private final Field keyField; + final Field timestampField; + final Map outputProperties; + + + @JsonCreator + public KsqlStructuredDataOutputNode(@JsonProperty("id") final PlanNodeId id, + @JsonProperty("source") final PlanNode source, + @JsonProperty("schema") final Schema schema, + @JsonProperty("timestamp") final Field timestampField, + @JsonProperty("key") final Field keyField, + @JsonProperty("ksqlTopic") final KsqlTopic ksqlTopic, + @JsonProperty("topicName") final String topicName, + @JsonProperty("outputProperties") final Map + outputProperties, + @JsonProperty("limit") final Optional limit) { + super(id, source, schema, limit); + this.kafkaTopicName = topicName; + this.keyField = keyField; + this.timestampField = timestampField; + this.ksqlTopic = ksqlTopic; + this.outputProperties = outputProperties; + } + + public String getKafkaTopicName() { + return kafkaTopicName; + } + + @Override + public Field getKeyField() { + return keyField; + } + + public Field getTimestampField() { + return timestampField; + } + + public KsqlTopic getKsqlTopic() { + return ksqlTopic; + } + + public Map getOutputProperties() { + return outputProperties; + } + +} diff --git a/ksql-core/src/main/java/io/confluent/ksql/planner/plan/OutputNode.java b/ksql-core/src/main/java/io/confluent/ksql/planner/plan/OutputNode.java new file mode 100644 index 000000000000..cb4099929816 --- /dev/null +++ b/ksql-core/src/main/java/io/confluent/ksql/planner/plan/OutputNode.java @@ -0,0 +1,64 @@ +/** + * Copyright 2017 Confluent Inc. + **/ + +package io.confluent.ksql.planner.plan; + +import com.fasterxml.jackson.annotation.JsonCreator; +import com.fasterxml.jackson.annotation.JsonProperty; +import com.google.common.collect.ImmutableList; +import org.apache.kafka.connect.data.Schema; + +import javax.annotation.concurrent.Immutable; +import java.util.List; +import java.util.Optional; + +import static java.util.Objects.requireNonNull; + +@Immutable +public abstract class OutputNode + extends PlanNode { + + private final PlanNode source; + private final Schema schema; + private final Optional limit; + + @JsonCreator + protected OutputNode(@JsonProperty("id") final PlanNodeId id, + @JsonProperty("source") final PlanNode source, + @JsonProperty("schema") final Schema schema, + @JsonProperty("limit") final Optional limit) { + super(id); + + requireNonNull(source, "source is null"); + requireNonNull(schema, "schema is null"); + + this.source = source; + this.schema = schema; + this.limit = limit; + } + + @Override + public Schema getSchema() { + return this.schema; + } + + @Override + public List getSources() { + return ImmutableList.of(source); + } + + public Optional getLimit() { + return limit; + } + + @JsonProperty + public PlanNode getSource() { + return source; + } + + @Override + public R accept(PlanVisitor visitor, C context) { + return visitor.visitOutput(this, context); + } +} diff --git a/ksql-core/src/main/java/io/confluent/ksql/planner/plan/PlanNode.java b/ksql-core/src/main/java/io/confluent/ksql/planner/plan/PlanNode.java new file mode 100644 index 000000000000..f9bdb8e75c5d --- /dev/null +++ b/ksql-core/src/main/java/io/confluent/ksql/planner/plan/PlanNode.java @@ -0,0 +1,47 @@ +/** + * Copyright 2017 Confluent Inc. + **/ + +package io.confluent.ksql.planner.plan; + +import com.fasterxml.jackson.annotation.JsonProperty; +import org.apache.kafka.connect.data.Field; +import org.apache.kafka.connect.data.Schema; + +import java.util.List; + +import static java.util.Objects.requireNonNull; + +public abstract class PlanNode { + + private final PlanNodeId id; + + protected PlanNode(final PlanNodeId id) { + requireNonNull(id, "id is null"); + this.id = id; + } + + @JsonProperty("id") + public PlanNodeId getId() { + return id; + } + + public abstract Schema getSchema(); + + public abstract Field getKeyField(); + + public abstract List getSources(); + + public R accept(PlanVisitor visitor, C context) { + return visitor.visitPlan(this, context); + } + + public StructuredDataSourceNode getTheSourceNode() { + if (this instanceof StructuredDataSourceNode) { + return (StructuredDataSourceNode) this; + } else if (this.getSources() != null && !this.getSources().isEmpty()) { + return this.getSources().get(0).getTheSourceNode(); + } + return null; + } +} diff --git a/ksql-core/src/main/java/io/confluent/ksql/planner/plan/PlanNodeId.java b/ksql-core/src/main/java/io/confluent/ksql/planner/plan/PlanNodeId.java new file mode 100644 index 000000000000..f3b246602f4d --- /dev/null +++ b/ksql-core/src/main/java/io/confluent/ksql/planner/plan/PlanNodeId.java @@ -0,0 +1,53 @@ +/** + * Copyright 2017 Confluent Inc. + **/ + +package io.confluent.ksql.planner.plan; + +import com.fasterxml.jackson.annotation.JsonCreator; +import com.fasterxml.jackson.annotation.JsonValue; + +import javax.annotation.concurrent.Immutable; + +import static java.util.Objects.requireNonNull; + +@Immutable +public class PlanNodeId { + + private final String id; + + @JsonCreator + public PlanNodeId(final String id) { + requireNonNull(id, "id is null"); + this.id = id; + } + + @Override + @JsonValue + public String toString() { + return id; + } + + @Override + public boolean equals(Object o) { + if (this == o) { + return true; + } + if (o == null || getClass() != o.getClass()) { + return false; + } + + PlanNodeId that = (PlanNodeId) o; + + if (!id.equals(that.id)) { + return false; + } + + return true; + } + + @Override + public int hashCode() { + return id.hashCode(); + } +} diff --git a/ksql-core/src/main/java/io/confluent/ksql/planner/plan/PlanVisitor.java b/ksql-core/src/main/java/io/confluent/ksql/planner/plan/PlanVisitor.java new file mode 100644 index 000000000000..84c1953a6d00 --- /dev/null +++ b/ksql-core/src/main/java/io/confluent/ksql/planner/plan/PlanVisitor.java @@ -0,0 +1,33 @@ +/** + * Copyright 2017 Confluent Inc. + **/ + +package io.confluent.ksql.planner.plan; + +public class PlanVisitor { + + protected R visitPlan(PlanNode node, C context) { + return null; + } + + public R visitFilter(FilterNode node, C context) { + return visitPlan(node, context); + } + + public R visitProject(ProjectNode node, C context) { + return visitPlan(node, context); + } + + public R visitStructuredDataSourceNode(StructuredDataSourceNode node, C context) { + return visitPlan(node, context); + } + + public R visitAggregate(AggregateNode node, C context) { + return visitPlan(node, context); + } + + public R visitOutput(OutputNode node, C context) { + return visitPlan(node, context); + } + +} \ No newline at end of file diff --git a/ksql-core/src/main/java/io/confluent/ksql/planner/plan/ProjectNode.java b/ksql-core/src/main/java/io/confluent/ksql/planner/plan/ProjectNode.java new file mode 100644 index 000000000000..2a764e14e774 --- /dev/null +++ b/ksql-core/src/main/java/io/confluent/ksql/planner/plan/ProjectNode.java @@ -0,0 +1,95 @@ +/** + * Copyright 2017 Confluent Inc. + **/ + +package io.confluent.ksql.planner.plan; + + +import com.fasterxml.jackson.annotation.JsonCreator; +import com.fasterxml.jackson.annotation.JsonProperty; +import com.google.common.collect.ImmutableList; +import io.confluent.ksql.parser.tree.Expression; +import io.confluent.ksql.util.KsqlException; +import io.confluent.ksql.util.Pair; + +import org.apache.kafka.connect.data.Field; +import org.apache.kafka.connect.data.Schema; + +import javax.annotation.concurrent.Immutable; + +import java.util.ArrayList; +import java.util.List; + +import static java.util.Objects.requireNonNull; + +@Immutable +public class ProjectNode + extends PlanNode { + + private final PlanNode source; + private final Schema schema; + private final Field keyField; + private final List projectExpressions; + + // TODO: pass in the "assignments" and the "outputs" + // TODO: separately (i.e., get rid if the symbol := symbol idiom) + @JsonCreator + public ProjectNode(@JsonProperty("id") final PlanNodeId id, + @JsonProperty("source") final PlanNode source, + @JsonProperty("schema") final Schema schema, + @JsonProperty("projectExpressions") + final List projectExpressions) { + super(id); + + requireNonNull(source, "source is null"); + requireNonNull(schema, "schema is null"); + requireNonNull(projectExpressions, "projectExpressions is null"); + + this.source = source; + this.schema = schema; + this.keyField = source.getKeyField(); + this.projectExpressions = projectExpressions; + } + + + @Override + public List getSources() { + return ImmutableList.of(source); + } + + @JsonProperty + public PlanNode getSource() { + return source; + } + + @Override + public Schema getSchema() { + return schema; + } + + @Override + public Field getKeyField() { + return keyField; + } + + public List getProjectExpressions() { + return projectExpressions; + } + + public List> getProjectNameExpressionPairList() { + if (schema.fields().size() != projectExpressions.size()) { + throw new KsqlException("Error in projection. Schema fields and expression list are not " + + "compatible."); + } + List> expressionPairs = new ArrayList<>(); + for (int i = 0; i < projectExpressions.size(); i++) { + expressionPairs.add(new Pair<>(schema.fields().get(i).name(), projectExpressions.get(i))); + } + return expressionPairs; + } + + @Override + public R accept(PlanVisitor visitor, C context) { + return visitor.visitProject(this, context); + } +} diff --git a/ksql-core/src/main/java/io/confluent/ksql/planner/plan/SourceNode.java b/ksql-core/src/main/java/io/confluent/ksql/planner/plan/SourceNode.java new file mode 100644 index 000000000000..c5e71ab983c7 --- /dev/null +++ b/ksql-core/src/main/java/io/confluent/ksql/planner/plan/SourceNode.java @@ -0,0 +1,36 @@ +/** + * Copyright 2017 Confluent Inc. + **/ + +package io.confluent.ksql.planner.plan; + +import com.fasterxml.jackson.annotation.JsonProperty; +import io.confluent.ksql.metastore.StructuredDataSource; +import org.apache.kafka.connect.data.Field; + +import javax.annotation.concurrent.Immutable; + + +@Immutable +public abstract class SourceNode extends PlanNode { + + private final StructuredDataSource.DataSourceType dataSourceType; + private final Field timestampField; + + public SourceNode(@JsonProperty("id") final PlanNodeId id, + @JsonProperty("timestampField") final Field timestampField, + @JsonProperty("dataSourceType") + final StructuredDataSource.DataSourceType dataSourceType) { + super(id); + this.dataSourceType = dataSourceType; + this.timestampField = timestampField; + } + + public StructuredDataSource.DataSourceType getDataSourceType() { + return dataSourceType; + } + + public Field getTimestampField() { + return timestampField; + } +} diff --git a/ksql-core/src/main/java/io/confluent/ksql/planner/plan/StructuredDataSourceNode.java b/ksql-core/src/main/java/io/confluent/ksql/planner/plan/StructuredDataSourceNode.java new file mode 100644 index 000000000000..6759b7909510 --- /dev/null +++ b/ksql-core/src/main/java/io/confluent/ksql/planner/plan/StructuredDataSourceNode.java @@ -0,0 +1,85 @@ +/** + * Copyright 2017 Confluent Inc. + **/ + +package io.confluent.ksql.planner.plan; + + +import com.fasterxml.jackson.annotation.JsonCreator; +import com.fasterxml.jackson.annotation.JsonProperty; +import io.confluent.ksql.metastore.StructuredDataSource; +import org.apache.kafka.connect.data.Field; +import org.apache.kafka.connect.data.Schema; + +import javax.annotation.concurrent.Immutable; +import java.util.List; + +import static java.util.Objects.requireNonNull; + +@Immutable +public class StructuredDataSourceNode + extends SourceNode { + + private final Schema schema; + private final String topicName; + private final Field keyField; + private final String alias; + StructuredDataSource structuredDataSource; + + // TODO: pass in the "assignments" and the "outputs" separately + // TODO: (i.e., get rid if the symbol := symbol idiom) + @JsonCreator + public StructuredDataSourceNode(@JsonProperty("id") final PlanNodeId id, + @JsonProperty("schema") final Schema schema, + @JsonProperty("keyField") final Field keyField, + @JsonProperty("timestampField") final Field timestampField, + @JsonProperty("topicName") final String topicName, + @JsonProperty("alias") final String alias, + @JsonProperty("dataSourceType") + final StructuredDataSource.DataSourceType + dataSourceType, + @JsonProperty("structuredDataSource") + final StructuredDataSource structuredDataSource) { + super(id, timestampField, dataSourceType); + + this.schema = schema; + requireNonNull(topicName, "topicName is null"); + + this.topicName = topicName; + this.keyField = keyField; + this.alias = alias; + this.structuredDataSource = structuredDataSource; + } + + public String getTopicName() { + return topicName; + } + + @Override + public Schema getSchema() { + return schema; + } + + @Override + public Field getKeyField() { + return keyField; + } + + public String getAlias() { + return alias; + } + + public StructuredDataSource getStructuredDataSource() { + return structuredDataSource; + } + + @Override + public List getSources() { + return null; + } + + @Override + public R accept(PlanVisitor visitor, C context) { + return visitor.visitStructuredDataSourceNode(this, context); + } +} diff --git a/ksql-core/src/main/java/io/confluent/ksql/serde/KsqlTopicSerDe.java b/ksql-core/src/main/java/io/confluent/ksql/serde/KsqlTopicSerDe.java new file mode 100644 index 000000000000..40ac74bddd16 --- /dev/null +++ b/ksql-core/src/main/java/io/confluent/ksql/serde/KsqlTopicSerDe.java @@ -0,0 +1,20 @@ +/** + * Copyright 2017 Confluent Inc. + **/ + +package io.confluent.ksql.serde; + +import io.confluent.ksql.metastore.StructuredDataSource; + +public abstract class KsqlTopicSerDe { + + private final StructuredDataSource.DataSourceSerDe serDe; + + protected KsqlTopicSerDe(StructuredDataSource.DataSourceSerDe serDe) { + this.serDe = serDe; + } + + public StructuredDataSource.DataSourceSerDe getSerDe() { + return serDe; + } +} diff --git a/ksql-core/src/main/java/io/confluent/ksql/serde/avro/KsqlAvroTopicSerDe.java b/ksql-core/src/main/java/io/confluent/ksql/serde/avro/KsqlAvroTopicSerDe.java new file mode 100644 index 000000000000..b8fcbcaf7c27 --- /dev/null +++ b/ksql-core/src/main/java/io/confluent/ksql/serde/avro/KsqlAvroTopicSerDe.java @@ -0,0 +1,22 @@ +/** + * Copyright 2017 Confluent Inc. + **/ + +package io.confluent.ksql.serde.avro; + +import io.confluent.ksql.metastore.StructuredDataSource; +import io.confluent.ksql.serde.KsqlTopicSerDe; + +public class KsqlAvroTopicSerDe extends KsqlTopicSerDe { + + private final String schemaString; + + public KsqlAvroTopicSerDe(final String schemaString) { + super(StructuredDataSource.DataSourceSerDe.AVRO); + this.schemaString = schemaString; + } + + public String getSchemaString() { + return schemaString; + } +} \ No newline at end of file diff --git a/ksql-core/src/main/java/io/confluent/ksql/serde/avro/KsqlGenericRowAvroDeserializer.java b/ksql-core/src/main/java/io/confluent/ksql/serde/avro/KsqlGenericRowAvroDeserializer.java new file mode 100644 index 000000000000..aa292d0c8e3d --- /dev/null +++ b/ksql-core/src/main/java/io/confluent/ksql/serde/avro/KsqlGenericRowAvroDeserializer.java @@ -0,0 +1,133 @@ +/** + * Copyright 2017 Confluent Inc. + **/ + +package io.confluent.ksql.serde.avro; + +import io.confluent.ksql.physical.GenericRow; +import io.confluent.ksql.util.KsqlException; +import org.apache.avro.Schema; +import org.apache.avro.generic.GenericData; +import org.apache.avro.generic.GenericDatumReader; +import org.apache.avro.generic.GenericRecord; +import org.apache.avro.io.Decoder; +import org.apache.avro.io.DecoderFactory; +import org.apache.kafka.common.errors.SerializationException; +import org.apache.kafka.common.serialization.Deserializer; + +import java.util.ArrayList; +import java.util.HashMap; +import java.util.List; +import java.util.Map; + +public class KsqlGenericRowAvroDeserializer implements Deserializer { + + private final org.apache.kafka.connect.data.Schema schema; + + String rowSchema; + Schema.Parser parser; + Schema avroSchema; + GenericDatumReader reader; + + public KsqlGenericRowAvroDeserializer(org.apache.kafka.connect.data.Schema schema) { + this.schema = schema; + } + + @Override + public void configure(final Map map, final boolean b) { + rowSchema = (String) map.get(KsqlGenericRowAvroSerializer.AVRO_SERDE_SCHEMA_CONFIG); + if (rowSchema == null) { + throw new SerializationException("Avro schema is not set for the deserializer."); + } + parser = new Schema.Parser(); + avroSchema = parser.parse(rowSchema); + reader = new GenericDatumReader<>(avroSchema); + } + + @Override + public GenericRow deserialize(final String topic, final byte[] bytes) { + if (bytes == null) { + return null; + } + + GenericRow genericRow = null; + GenericRecord genericRecord = null; + try { + Decoder decoder = DecoderFactory.get().binaryDecoder((bytes[0] == 0)? + removeSchemaRegistryMetaBytes(bytes): + bytes, null); + genericRecord = reader.read(genericRecord, decoder); + List fields = genericRecord.getSchema().getFields(); + List columns = new ArrayList(); + for (Schema.Field field : fields) { + columns.add(enforceFieldType(field.schema(), genericRecord.get(field.name()))); + } + genericRow = new GenericRow(columns); + } catch (Exception e) { + throw new SerializationException(e); + } + return genericRow; + } + + private byte[] removeSchemaRegistryMetaBytes(final byte[] data) { + byte[] avroBytes = new byte[data.length - 5]; + for (int i = 5; i < data.length; i++) { + avroBytes[i-5] = data[i]; + } + return avroBytes; + } + + private Object enforceFieldType(Schema fieldSchema, Object value) { + + switch (fieldSchema.getType()) { + case BOOLEAN: + case INT: + case LONG: + case DOUBLE: + case STRING: + case MAP: + return value; + case ARRAY: + GenericData.Array genericArray = (GenericData.Array) value; + Class elementClass = getJavaTypeForAvroType(fieldSchema.getElementType()); + Object[] arrayField = + (Object[]) java.lang.reflect.Array.newInstance(elementClass, genericArray.size()); + for (int i = 0; i < genericArray.size(); i++) { + Object obj = enforceFieldType(fieldSchema.getElementType(), genericArray.get(i)); + arrayField[i] = obj; + } + return arrayField; + default: + throw new KsqlException("Type is not supported: " + fieldSchema.getType()); + + } + } + + private static Class getJavaTypeForAvroType(final Schema schema) { + switch (schema.getType()) { + case STRING: + return String.class; + case BOOLEAN: + return Boolean.class; + case INT: + return Integer.class; + case LONG: + return Long.class; + case DOUBLE: + return Double.class; + case ARRAY: + Class elementClass = getJavaTypeForAvroType(schema.getElementType()); + return java.lang.reflect.Array.newInstance(elementClass, 0).getClass(); + case MAP: + return (new HashMap<>()).getClass(); + default: + throw new KsqlException("Type is not supported: " + schema.getType()); + } + } + + + @Override + public void close() { + + } +} diff --git a/ksql-core/src/main/java/io/confluent/ksql/serde/avro/KsqlGenericRowAvroSerializer.java b/ksql-core/src/main/java/io/confluent/ksql/serde/avro/KsqlGenericRowAvroSerializer.java new file mode 100644 index 000000000000..3a7a35019fe3 --- /dev/null +++ b/ksql-core/src/main/java/io/confluent/ksql/serde/avro/KsqlGenericRowAvroSerializer.java @@ -0,0 +1,85 @@ +/** + * Copyright 2017 Confluent Inc. + **/ + +package io.confluent.ksql.serde.avro; + +import io.confluent.ksql.physical.GenericRow; +import org.apache.avro.Schema; +import org.apache.avro.generic.GenericData; +import org.apache.avro.generic.GenericDatumWriter; +import org.apache.avro.generic.GenericRecord; +import org.apache.avro.io.Encoder; +import org.apache.avro.io.EncoderFactory; +import org.apache.kafka.common.errors.SerializationException; +import org.apache.kafka.common.serialization.Serializer; + +import java.io.ByteArrayOutputStream; +import java.io.IOException; +import java.util.Arrays; +import java.util.List; +import java.util.Map; + +public class KsqlGenericRowAvroSerializer implements Serializer { + + public static final String AVRO_SERDE_SCHEMA_CONFIG = "avro.serde.schema"; + public static final String AVRO_SERDE_SCHEMA_DIRECTORY_DEFAULT = "/tmp/"; + + private final org.apache.kafka.connect.data.Schema schema; + + String rowSchema; + Schema.Parser parser; + Schema avroSchema; + GenericDatumWriter writer; + ByteArrayOutputStream output; + Encoder encoder; + List fields; + + public KsqlGenericRowAvroSerializer(org.apache.kafka.connect.data.Schema schema) { + this.schema = schema; + } + + @Override + public void configure(final Map map, final boolean b) { + rowSchema = (String) map.get(AVRO_SERDE_SCHEMA_CONFIG); + if (rowSchema == null) { + throw new SerializationException("Avro schema is not set for the serializer."); + } + parser = new Schema.Parser(); + avroSchema = parser.parse(rowSchema); + fields = avroSchema.getFields(); + writer = new GenericDatumWriter<>(avroSchema); + } + + @Override + public byte[] serialize(final String topic, final GenericRow genericRow) { + if (genericRow == null) { + return null; + } + GenericRecord avroRecord = new GenericData.Record(avroSchema); + for (int i = 0; i < genericRow.getColumns().size(); i++) { + if (fields.get(i).schema().getType() == Schema.Type.ARRAY) { + avroRecord.put(fields.get(i).name(), Arrays.asList((Object[]) genericRow.getColumns().get(i))); + } else { + avroRecord.put(fields.get(i).name(), genericRow.getColumns().get(i)); + } + + } + + try { + output = new ByteArrayOutputStream(); + encoder = EncoderFactory.get().binaryEncoder(output, null); + writer.write(avroRecord, encoder); + encoder.flush(); + output.flush(); + } catch (IOException e) { + throw new SerializationException("Error serializing AVRO message", e); + } + return output.toByteArray(); + } + + @Override + public void close() { + + } +} diff --git a/ksql-core/src/main/java/io/confluent/ksql/serde/delimited/KsqlDelimitedDeserializer.java b/ksql-core/src/main/java/io/confluent/ksql/serde/delimited/KsqlDelimitedDeserializer.java new file mode 100644 index 000000000000..ca1bf9b95562 --- /dev/null +++ b/ksql-core/src/main/java/io/confluent/ksql/serde/delimited/KsqlDelimitedDeserializer.java @@ -0,0 +1,96 @@ +/** + * Copyright 2017 Confluent Inc. + **/ + +package io.confluent.ksql.serde.delimited; + +import com.fasterxml.jackson.databind.JsonNode; +import com.fasterxml.jackson.databind.node.ArrayNode; + +import io.confluent.ksql.physical.GenericRow; +import io.confluent.ksql.util.KsqlException; +import io.confluent.ksql.util.SchemaUtil; + +import org.apache.commons.csv.CSVFormat; +import org.apache.commons.csv.CSVParser; +import org.apache.commons.csv.CSVRecord; +import org.apache.kafka.common.serialization.Deserializer; +import org.apache.kafka.connect.data.Schema; + +import java.io.IOException; +import java.util.ArrayList; +import java.util.HashMap; +import java.util.Iterator; +import java.util.List; +import java.util.Map; + +public class KsqlDelimitedDeserializer implements Deserializer { + + private final Schema schema; + + public KsqlDelimitedDeserializer(Schema schema) { + this.schema = schema; + } + + @Override + public void configure(Map map, boolean b) { + + } + + @Override + public GenericRow deserialize(final String topic, final byte[] bytes) { + if (bytes == null) { + return null; + } + String recordCsvString = new String(bytes); + try { + List csvRecords = CSVParser.parse(recordCsvString, CSVFormat.DEFAULT) + .getRecords(); + if (csvRecords == null || csvRecords.isEmpty()) { + throw new KsqlException("Deserialization error in the delimited line: " + recordCsvString); + } + CSVRecord csvRecord = csvRecords.get(0); + if (csvRecord == null || csvRecord.size() == 0) { + throw new KsqlException("Deserialization error in the delimited line: " + recordCsvString); + } + List columns = new ArrayList(); + if (csvRecord.size() != schema.fields().size()) { + throw new KsqlException("Missing/Extra fields in the delimited line: " + recordCsvString); + } + for (int i = 0; i < csvRecord.size(); i++) { + columns.add(enforceFieldType(schema.fields().get(i).schema(), csvRecord.get(i))); + } + return new GenericRow(columns); + } catch (IOException e) { + throw new KsqlException("Could not parse the DELIMITED record: " + recordCsvString, e); + } catch (Exception e) { + throw new KsqlException("Exception in deserializing the delimited row: " + recordCsvString, + e); + } + } + + private Object enforceFieldType(Schema fieldSchema, String delimitedField) { + + switch (fieldSchema.type()) { + case BOOLEAN: + return Boolean.parseBoolean(delimitedField); + case INT32: + return Integer.parseInt(delimitedField); + case INT64: + return Long.parseLong(delimitedField); + case FLOAT64: + return Double.parseDouble(delimitedField); + case STRING: + return delimitedField; + case ARRAY: + case MAP: + default: + throw new KsqlException("Type is not supported: " + fieldSchema.type()); + } + } + + @Override + public void close() { + + } +} diff --git a/ksql-core/src/main/java/io/confluent/ksql/serde/delimited/KsqlDelimitedSerializer.java b/ksql-core/src/main/java/io/confluent/ksql/serde/delimited/KsqlDelimitedSerializer.java new file mode 100644 index 000000000000..8bc48a624be0 --- /dev/null +++ b/ksql-core/src/main/java/io/confluent/ksql/serde/delimited/KsqlDelimitedSerializer.java @@ -0,0 +1,49 @@ +/** + * Copyright 2017 Confluent Inc. + **/ + +package io.confluent.ksql.serde.delimited; + +import io.confluent.ksql.physical.GenericRow; +import io.confluent.ksql.util.KsqlException; + +import org.apache.kafka.common.serialization.Serializer; + +import java.util.Map; + + +public class KsqlDelimitedSerializer implements Serializer { + + + @Override + public void configure(Map map, boolean b) { + + } + + @Override + public byte[] serialize(final String topic, final GenericRow genericRow) { + if (genericRow == null) { + return null; + } + + try { + StringBuilder recordString = new StringBuilder(); + for (int i = 0; i < genericRow.getColumns().size(); i++) { + if (i != 0) { + recordString.append(","); + } + recordString.append(genericRow.getColumns().get(i).toString()); + } + return recordString.toString().getBytes(); + } catch (Exception e) { + throw new KsqlException(e.getMessage(), e); + } + + + } + + @Override + public void close() { + + } +} diff --git a/ksql-core/src/main/java/io/confluent/ksql/serde/delimited/KsqlDelimitedTopicSerDe.java b/ksql-core/src/main/java/io/confluent/ksql/serde/delimited/KsqlDelimitedTopicSerDe.java new file mode 100644 index 000000000000..c5df82039421 --- /dev/null +++ b/ksql-core/src/main/java/io/confluent/ksql/serde/delimited/KsqlDelimitedTopicSerDe.java @@ -0,0 +1,16 @@ +/** + * Copyright 2017 Confluent Inc. + **/ + +package io.confluent.ksql.serde.delimited; + +import io.confluent.ksql.metastore.StructuredDataSource; +import io.confluent.ksql.serde.KsqlTopicSerDe; + + +public class KsqlDelimitedTopicSerDe extends KsqlTopicSerDe { + + public KsqlDelimitedTopicSerDe() { + super(StructuredDataSource.DataSourceSerDe.DELIMITED); + } +} diff --git a/ksql-core/src/main/java/io/confluent/ksql/serde/json/KsqlJsonDeserializer.java b/ksql-core/src/main/java/io/confluent/ksql/serde/json/KsqlJsonDeserializer.java new file mode 100644 index 000000000000..fb1b2aef5c42 --- /dev/null +++ b/ksql-core/src/main/java/io/confluent/ksql/serde/json/KsqlJsonDeserializer.java @@ -0,0 +1,140 @@ +/** + * Copyright 2017 Confluent Inc. + **/ + +package io.confluent.ksql.serde.json; + +import com.fasterxml.jackson.databind.JsonNode; +import com.fasterxml.jackson.databind.ObjectMapper; +import com.fasterxml.jackson.databind.node.ArrayNode; +import io.confluent.ksql.physical.GenericRow; +import io.confluent.ksql.util.KsqlException; +import io.confluent.ksql.util.SchemaUtil; +import org.apache.kafka.common.errors.SerializationException; +import org.apache.kafka.common.serialization.Deserializer; +import org.apache.kafka.connect.data.Field; +import org.apache.kafka.connect.data.Schema; + +import java.io.IOException; +import java.util.ArrayList; +import java.util.HashMap; +import java.util.Iterator; +import java.util.List; +import java.util.Map; + +public class KsqlJsonDeserializer implements Deserializer { + + + //TODO: Possibily use Streaming API instead of ObjectMapper for better performance + private ObjectMapper objectMapper = new ObjectMapper(); + + private final Schema schema; + private final Map caseSensitiveKeyMap = new HashMap<>(); + + /** + * Default constructor needed by Kafka + */ + public KsqlJsonDeserializer(Schema schema) { + this.schema = schema; + } + + @Override + public void configure(Map map, boolean b) { + + } + + @Override + public GenericRow deserialize(final String topic, final byte[] bytes) { + if (bytes == null) { + return null; + } + + GenericRow data; + try { + data = getGenericRow(bytes); + } catch (Exception e) { + throw new SerializationException(e); + } + return data; + } + + private GenericRow getGenericRow(byte[] rowJsonBytes) throws IOException { + JsonNode jsonNode = objectMapper.readTree(rowJsonBytes); + CaseInsensitiveJsonNode caseInsensitiveJsonNode = new CaseInsensitiveJsonNode(jsonNode); + Map keyMap = caseInsensitiveJsonNode.keyMap; + List columns = new ArrayList(); + for (Field field: schema.fields()) { + String jsonFieldName = field.name().substring(field.name().indexOf(".") + 1); + JsonNode fieldJsonNode = jsonNode.get(keyMap.get(jsonFieldName)); + if (fieldJsonNode == null) { + columns.add(null); + } else { + columns.add(enforceFieldType(field.schema(), fieldJsonNode)); + } + + } + return new GenericRow(columns); + } + + private Object enforceFieldType(Schema fieldSchema, JsonNode fieldJsonNode) { + + switch (fieldSchema.type()) { + case BOOLEAN: + return fieldJsonNode.asBoolean(); + case INT32: + return fieldJsonNode.asInt(); + case INT64: + return fieldJsonNode.asLong(); + case FLOAT64: + return fieldJsonNode.asDouble(); + case STRING: + if (fieldJsonNode.isTextual()) { + return fieldJsonNode.asText(); + } else { + return fieldJsonNode.toString(); + } + case ARRAY: + ArrayNode arrayNode = (ArrayNode) fieldJsonNode; + Class elementClass = SchemaUtil.getJavaType(fieldSchema.valueSchema()); + Object[] arrayField = + (Object[]) java.lang.reflect.Array.newInstance(elementClass, arrayNode.size()); + for (int i = 0; i < arrayNode.size(); i++) { + arrayField[i] = enforceFieldType(fieldSchema.valueSchema(), arrayNode.get(i)); + } + return arrayField; + case MAP: + Map mapField = new HashMap<>(); + Iterator> iterator = fieldJsonNode.fields(); + while (iterator.hasNext()) { + Map.Entry entry = iterator.next(); + mapField.put(entry.getKey(), enforceFieldType(fieldSchema.valueSchema(), + entry.getValue())); + } + return mapField; + default: + throw new KsqlException("Type is not supported: " + fieldSchema.type()); + + } + + } + + class CaseInsensitiveJsonNode { + JsonNode jsonNode; + Map keyMap = new HashMap<>(); + + CaseInsensitiveJsonNode(JsonNode jsonNode) { + this.jsonNode = jsonNode; + Iterator fieldNames = jsonNode.fieldNames(); + while (fieldNames.hasNext()) { + String fieldName = fieldNames.next(); + keyMap.put(fieldName.toUpperCase(), fieldName); + } + } + + } + + @Override + public void close() { + + } +} diff --git a/ksql-core/src/main/java/io/confluent/ksql/serde/json/KsqlJsonSerializer.java b/ksql-core/src/main/java/io/confluent/ksql/serde/json/KsqlJsonSerializer.java new file mode 100644 index 000000000000..b8fcba0da488 --- /dev/null +++ b/ksql-core/src/main/java/io/confluent/ksql/serde/json/KsqlJsonSerializer.java @@ -0,0 +1,65 @@ +/** + * Copyright 2017 Confluent Inc. + **/ + +package io.confluent.ksql.serde.json; + +import com.fasterxml.jackson.databind.ObjectMapper; +import io.confluent.ksql.physical.GenericRow; +import org.apache.kafka.common.errors.SerializationException; +import org.apache.kafka.common.serialization.Serializer; +import org.apache.kafka.connect.data.Schema; + +import java.util.HashMap; +import java.util.Map; + +public class KsqlJsonSerializer implements Serializer { + + private final ObjectMapper objectMapper = new ObjectMapper(); + private final Schema schema; + + /** + * Default constructor needed by Kafka + */ + public KsqlJsonSerializer(Schema schema) { + this.schema = schema; + } + + @SuppressWarnings("unchecked") + @Override + public void configure(final Map props, final boolean isKey) { + } + + @Override + public byte[] serialize(final String topic, final GenericRow data) { + if (data == null) { + return null; + } + + try { + return objectMapper.writeValueAsBytes(dataToMap(data)); + } catch (Exception e) { + throw new SerializationException("Error serializing JSON message", e); + } + } + + public Map dataToMap(final GenericRow data) { + if (data == null) { + return null; + } + Map result = new HashMap<>(); + + for (int i = 0; i < data.getColumns().size(); i++) { + String schemaColumnName = schema.fields().get(i).name(); + String mapColumnName = schemaColumnName.substring(schemaColumnName.indexOf('.') + 1); + result.put(mapColumnName, data.getColumns().get(i)); + } + + return result; + } + + @Override + public void close() { + } + +} diff --git a/ksql-core/src/main/java/io/confluent/ksql/serde/json/KsqlJsonTopicSerDe.java b/ksql-core/src/main/java/io/confluent/ksql/serde/json/KsqlJsonTopicSerDe.java new file mode 100644 index 000000000000..b1d53bc9e2d0 --- /dev/null +++ b/ksql-core/src/main/java/io/confluent/ksql/serde/json/KsqlJsonTopicSerDe.java @@ -0,0 +1,22 @@ +/** + * Copyright 2017 Confluent Inc. + **/ + +package io.confluent.ksql.serde.json; + +import io.confluent.ksql.metastore.StructuredDataSource; +import io.confluent.ksql.serde.KsqlTopicSerDe; +import org.apache.kafka.connect.data.Schema; + +public class KsqlJsonTopicSerDe extends KsqlTopicSerDe { + + Schema rowSchema; + public KsqlJsonTopicSerDe(Schema rowSchema) { + super(StructuredDataSource.DataSourceSerDe.JSON); + this.rowSchema = rowSchema; + } + + public Schema getRowSchema() { + return rowSchema; + } +} \ No newline at end of file diff --git a/ksql-core/src/main/java/io/confluent/ksql/structured/QueuedSchemaKStream.java b/ksql-core/src/main/java/io/confluent/ksql/structured/QueuedSchemaKStream.java new file mode 100644 index 000000000000..eec33e95c8a4 --- /dev/null +++ b/ksql-core/src/main/java/io/confluent/ksql/structured/QueuedSchemaKStream.java @@ -0,0 +1,114 @@ +/** + * Copyright 2017 Confluent Inc. + **/ + +package io.confluent.ksql.structured; + +import io.confluent.ksql.parser.tree.Expression; +import io.confluent.ksql.physical.GenericRow; +import io.confluent.ksql.serde.KsqlTopicSerDe; +import io.confluent.ksql.util.KafkaTopicClient; +import io.confluent.ksql.util.KsqlConfig; +import io.confluent.ksql.util.Pair; + +import org.apache.kafka.common.serialization.Serde; +import org.apache.kafka.connect.data.Field; +import org.apache.kafka.connect.data.Schema; +import org.apache.kafka.streams.KeyValue; +import org.apache.kafka.streams.kstream.KStream; + +import java.util.List; +import java.util.Set; +import java.util.concurrent.SynchronousQueue; + +public class QueuedSchemaKStream extends SchemaKStream { + + private final SynchronousQueue> rowQueue; + + public QueuedSchemaKStream(final Schema schema, final KStream kstream, final Field keyField, + final List sourceSchemaKStreams, + SynchronousQueue> rowQueue, + Type type) { + super(schema, kstream, keyField, sourceSchemaKStreams, type); + this.rowQueue = rowQueue; + } + + public QueuedSchemaKStream(SchemaKStream schemaKStream, + SynchronousQueue> rowQueue, + Type type) { + this( + schemaKStream.schema, + schemaKStream.kstream, + schemaKStream.keyField, + schemaKStream.sourceSchemaKStreams, + rowQueue, + type + ); + } + + public SynchronousQueue> getQueue() { + return rowQueue; + } + + @Override + public SchemaKStream into(String kafkaTopicName, Serde topicValueSerDe, + Set rowkeyIndexes, KsqlConfig ksqlConfig, KafkaTopicClient kafkaTopicClient) { + throw new UnsupportedOperationException(); + } + + @Override + public SchemaKStream filter(Expression filterExpression) throws Exception { + throw new UnsupportedOperationException(); + } + + @Override + public SchemaKStream select(Schema selectSchema) { + throw new UnsupportedOperationException(); + } + + @Override + public SchemaKStream select(List> expressions) throws Exception { + throw new UnsupportedOperationException(); + } + + @Override + public SchemaKStream leftJoin(SchemaKTable schemaKTable, Schema joinSchema, + Field joinKey, KsqlTopicSerDe joinSerDe) { + throw new UnsupportedOperationException(); + } + + @Override + public SchemaKStream selectKey(Field newKeyField) { + throw new UnsupportedOperationException(); + } + + @Override + public SchemaKGroupedStream groupByKey() { + throw new UnsupportedOperationException(); + } + + @Override + public SchemaKGroupedStream groupByKey(Serde keySerde, Serde valSerde) { + throw new UnsupportedOperationException(); + } + + @Override + public Field getKeyField() { + return super.getKeyField(); + } + + @Override + public Schema getSchema() { + return super.getSchema(); + } + + @Override + public KStream getKstream() { + return super.getKstream(); + } + + @Override + public List getSourceSchemaKStreams() { + return super.getSourceSchemaKStreams(); + } +} diff --git a/ksql-core/src/main/java/io/confluent/ksql/structured/SchemaKGroupedStream.java b/ksql-core/src/main/java/io/confluent/ksql/structured/SchemaKGroupedStream.java new file mode 100644 index 000000000000..fd2b97293cf8 --- /dev/null +++ b/ksql-core/src/main/java/io/confluent/ksql/structured/SchemaKGroupedStream.java @@ -0,0 +1,125 @@ +/** + * Copyright 2017 Confluent Inc. + **/ + +package io.confluent.ksql.structured; + +import io.confluent.ksql.function.udaf.KudafAggregator; +import io.confluent.ksql.parser.tree.HoppingWindowExpression; +import io.confluent.ksql.parser.tree.SessionWindowExpression; +import io.confluent.ksql.parser.tree.TumblingWindowExpression; +import io.confluent.ksql.parser.tree.WindowExpression; +import io.confluent.ksql.physical.GenericRow; +import io.confluent.ksql.util.GenericRowValueTypeEnforcer; +import io.confluent.ksql.util.KsqlException; +import org.apache.kafka.common.serialization.Serde; +import org.apache.kafka.connect.data.Field; +import org.apache.kafka.connect.data.Schema; +import org.apache.kafka.streams.kstream.Aggregator; +import org.apache.kafka.streams.kstream.Initializer; +import org.apache.kafka.streams.kstream.KGroupedStream; +import org.apache.kafka.streams.kstream.KTable; +import org.apache.kafka.streams.kstream.SessionWindows; +import org.apache.kafka.streams.kstream.TimeWindows; +import org.apache.kafka.streams.kstream.Windowed; +import org.apache.kafka.streams.kstream.internals.SessionWindow; + +import java.util.List; + +public class SchemaKGroupedStream { + + private final Schema schema; + private final KGroupedStream kgroupedStream; + private final Field keyField; + private final GenericRowValueTypeEnforcer genericRowValueTypeEnforcer; + private final List sourceSchemaKStreams; + + public SchemaKGroupedStream(final Schema schema, final KGroupedStream kgroupedStream, + final Field keyField, + final List sourceSchemaKStreams) { + this.schema = schema; + this.kgroupedStream = kgroupedStream; + this.keyField = keyField; + this.genericRowValueTypeEnforcer = new GenericRowValueTypeEnforcer(schema); + this.sourceSchemaKStreams = sourceSchemaKStreams; + } + + public SchemaKTable aggregate(final Initializer initializer, + final KudafAggregator aggregator, + final WindowExpression windowExpression, + final Serde topicValueSerDe, + final String storeName) { + boolean isWindowed = false; + KTable, GenericRow> aggKtable; + if (windowExpression != null) { + isWindowed = true; + if (windowExpression.getKsqlWindowExpression() instanceof TumblingWindowExpression) { + TumblingWindowExpression tumblingWindowExpression = + (TumblingWindowExpression) windowExpression.getKsqlWindowExpression(); + aggKtable = + kgroupedStream + .aggregate(initializer, aggregator, + TimeWindows.of( + getWindowUnitInMillisecond( + tumblingWindowExpression.getSize(), + tumblingWindowExpression + .getSizeUnit())), + topicValueSerDe, + storeName); + } else if (windowExpression.getKsqlWindowExpression() instanceof HoppingWindowExpression) { + HoppingWindowExpression hoppingWindowExpression = + (HoppingWindowExpression) windowExpression.getKsqlWindowExpression(); + aggKtable = + kgroupedStream + .aggregate(initializer, aggregator, + TimeWindows.of( + getWindowUnitInMillisecond(hoppingWindowExpression.getSize(), + hoppingWindowExpression.getSizeUnit())) + .advanceBy(getWindowUnitInMillisecond( + hoppingWindowExpression.getAdvanceBy(), + hoppingWindowExpression.getAdvanceByUnit())), + topicValueSerDe, storeName); + } else if (windowExpression.getKsqlWindowExpression() instanceof SessionWindowExpression) { + SessionWindowExpression sessionWindowExpression = + (SessionWindowExpression) windowExpression.getKsqlWindowExpression(); + aggKtable = + kgroupedStream + .aggregate(initializer, aggregator, + aggregator.getMerger(), + SessionWindows.with( + getWindowUnitInMillisecond( + sessionWindowExpression.getGap(), + sessionWindowExpression + .getSizeUnit())), + topicValueSerDe, + storeName); + } else { + throw new KsqlException("Could not set the window expression for aggregate."); + } + } else { + aggKtable = + kgroupedStream.aggregate(initializer, aggregator, topicValueSerDe, storeName); + } + return new SchemaKTable(schema, aggKtable, keyField, sourceSchemaKStreams, isWindowed, + SchemaKStream.Type.AGGREGATE); + } + + private long getWindowUnitInMillisecond(long value, WindowExpression.WindowUnit windowUnit) { + + switch (windowUnit) { + case DAY: + return value * 24 * 60 * 60 * 1000; + case HOUR: + return value * 60 * 60 * 1000; + case MINUTE: + return value * 60 * 1000; + case SECOND: + return value * 1000; + case MILLISECOND: + return value; + default: + return -1; + } + } + +} diff --git a/ksql-core/src/main/java/io/confluent/ksql/structured/SchemaKStream.java b/ksql-core/src/main/java/io/confluent/ksql/structured/SchemaKStream.java new file mode 100644 index 000000000000..8383f78d374a --- /dev/null +++ b/ksql-core/src/main/java/io/confluent/ksql/structured/SchemaKStream.java @@ -0,0 +1,326 @@ +/** + * Copyright 2017 Confluent Inc. + **/ + +package io.confluent.ksql.structured; + +import io.confluent.ksql.function.udf.Kudf; +import io.confluent.ksql.parser.tree.Expression; +import io.confluent.ksql.physical.GenericRow; +import io.confluent.ksql.serde.KsqlTopicSerDe; +import io.confluent.ksql.util.ExpressionMetadata; +import io.confluent.ksql.util.ExpressionUtil; +import io.confluent.ksql.util.GenericRowValueTypeEnforcer; +import io.confluent.ksql.util.KsqlConfig; +import io.confluent.ksql.util.KsqlException; +import io.confluent.ksql.util.Pair; +import io.confluent.ksql.util.SchemaUtil; +import io.confluent.ksql.util.SerDeUtil; +import io.confluent.ksql.util.KafkaTopicClient; + +import org.apache.kafka.common.serialization.Serde; +import org.apache.kafka.common.serialization.Serdes; +import org.apache.kafka.connect.data.Field; +import org.apache.kafka.connect.data.Schema; +import org.apache.kafka.connect.data.SchemaBuilder; +import org.apache.kafka.streams.KeyValue; +import org.apache.kafka.streams.kstream.ForeachAction; +import org.apache.kafka.streams.kstream.KGroupedStream; +import org.apache.kafka.streams.kstream.KStream; +import org.apache.kafka.streams.kstream.KeyValueMapper; +import org.apache.kafka.streams.kstream.ValueJoiner; +import org.apache.kafka.streams.kstream.ValueMapper; +import org.apache.kafka.streams.kstream.Windowed; +import org.slf4j.Logger; +import org.slf4j.LoggerFactory; + +import java.lang.reflect.InvocationTargetException; +import java.util.ArrayList; +import java.util.Arrays; +import java.util.List; +import java.util.Objects; +import java.util.Optional; +import java.util.Set; +import java.util.concurrent.SynchronousQueue; + +public class SchemaKStream { + + public enum Type { SOURCE, PROJECT, FILTER, AGGREGATE, SINK, REKEY, JOIN, TOSTREAM } + + protected final Schema schema; + protected final KStream kstream; + protected final Field keyField; + protected final List sourceSchemaKStreams; + protected final GenericRowValueTypeEnforcer genericRowValueTypeEnforcer; + protected final Type type; + + private static final Logger log = LoggerFactory.getLogger(SchemaKStream.class); + + public SchemaKStream(final Schema schema, final KStream kstream, final Field keyField, + final List sourceSchemaKStreams, Type type) { + this.schema = schema; + this.kstream = kstream; + this.keyField = keyField; + this.sourceSchemaKStreams = sourceSchemaKStreams; + this.genericRowValueTypeEnforcer = new GenericRowValueTypeEnforcer(schema); + this.type = type; + } + + public QueuedSchemaKStream toQueue(Optional limit) { + SynchronousQueue> rowQueue = new SynchronousQueue<>(); + kstream.foreach(new QueuePopulator(rowQueue, limit)); + return new QueuedSchemaKStream(this, rowQueue, Type.SINK); + } + + public SchemaKStream into(final String kafkaTopicName, final Serde topicValueSerDe, + final Set rowkeyIndexes, KsqlConfig ksqlConfig, KafkaTopicClient kafkaTopicClient) { + + createSinkTopic(kafkaTopicName, ksqlConfig, kafkaTopicClient); + + kstream + .map(new KeyValueMapper>() { + @Override + public KeyValue apply(String key, GenericRow row) { + if (row == null) { + return new KeyValue<>(key, null); + } + List columns = new ArrayList(); + for (int i = 0; i < row.getColumns().size(); i++) { + if (!rowkeyIndexes.contains(i)) { + columns.add(row.getColumns().get(i)); + } + } + return new KeyValue<>(key, new GenericRow(columns)); + } + }).to(Serdes.String(), topicValueSerDe, kafkaTopicName); + return this; + } + + public SchemaKStream filter(final Expression filterExpression) throws Exception { + SqlPredicate predicate = new SqlPredicate(filterExpression, schema, false); + KStream filteredKStream = kstream.filter(predicate.getPredicate()); + return new SchemaKStream(schema, filteredKStream, keyField, Arrays.asList(this), + Type.FILTER); + } + + public SchemaKStream select(final Schema selectSchema) { + + KStream + projectedKStream = + kstream.map(new KeyValueMapper>() { + @Override + public KeyValue apply(String key, GenericRow row) { + List newColumns = new ArrayList(); + for (Field schemaField : selectSchema.fields()) { + newColumns.add( + row.getColumns().get(SchemaUtil.getFieldIndexByName(schema, schemaField.name()))); + } + GenericRow newRow = new GenericRow(newColumns); + return new KeyValue(key, newRow); + } + }); + + return new SchemaKStream(selectSchema, projectedKStream, keyField, Arrays.asList(this), + Type.PROJECT); + } + + public SchemaKStream select(final List> expressionPairList) + throws Exception { + ExpressionUtil expressionUtil = new ExpressionUtil(); + // TODO: Optimize to remove the code gen for constants and single columns references + // TODO: and use them directly. + // TODO: Only use code get when we have real expression. + List expressionEvaluators = new ArrayList<>(); + SchemaBuilder schemaBuilder = SchemaBuilder.struct(); + for (Pair expressionPair : expressionPairList) { + ExpressionMetadata + expressionEvaluator = + expressionUtil.getExpressionEvaluator(expressionPair.getRight(), schema); + schemaBuilder.field(expressionPair.getLeft(), expressionEvaluator.getExpressionType()); + expressionEvaluators.add(expressionEvaluator); + } + KStream + projectedKStream = + kstream.mapValues(new ValueMapper() { + @Override + public GenericRow apply(GenericRow row) { + try { + List newColumns = new ArrayList(); + for (int i = 0; i < expressionPairList.size(); i++) { + try { + int[] parameterIndexes = expressionEvaluators.get(i).getIndexes(); + Kudf[] kudfs = expressionEvaluators.get(i).getUdfs(); + Object[] parameterObjects = new Object[parameterIndexes.length]; + for (int j = 0; j < parameterIndexes.length; j++) { + if (parameterIndexes[j] < 0) { + parameterObjects[j] = kudfs[j]; + } else { + parameterObjects[j] = genericRowValueTypeEnforcer + .enforceFieldType(parameterIndexes[j], + row.getColumns().get(parameterIndexes[j])); + } + } + Object columnValue = null; + columnValue = expressionEvaluators + .get(i).getExpressionEvaluator().evaluate(parameterObjects); + newColumns.add(columnValue); + } catch (Exception ex) { + log.error("Error calculating column with index " + i + " : " + + expressionPairList.get(i).getLeft()); + newColumns.add(null); + } + } + GenericRow newRow = new GenericRow(newColumns); + return newRow; + } catch (Exception e) { + log.error("Projection exception for row: " + row.toString()); + log.error(e.getMessage(), e); + throw new KsqlException("Error in SELECT clause: " + e.getMessage(), e); + } + } + }); + + return new SchemaKStream(schemaBuilder.build(), + projectedKStream, keyField, Arrays.asList(this), + Type.PROJECT); + } + + public SchemaKStream leftJoin(final SchemaKTable schemaKTable, final Schema joinSchema, + final Field joinKey, + KsqlTopicSerDe joinSerDe) { + + KStream joinedKStream = + kstream.leftJoin( + schemaKTable.getKtable(), new ValueJoiner() { + @Override + public GenericRow apply(GenericRow leftGenericRow, GenericRow rightGenericRow) { + List columns = new ArrayList<>(); + columns.addAll(leftGenericRow.getColumns()); + if (rightGenericRow == null) { + for (int i = leftGenericRow.getColumns().size(); + i < joinSchema.fields().size(); i++) { + columns.add(null); + } + } else { + columns.addAll(rightGenericRow.getColumns()); + } + + GenericRow joinGenericRow = new GenericRow(columns); + return joinGenericRow; + } + }, Serdes.String(), SerDeUtil.getRowSerDe(joinSerDe, this.getSchema())); + + return new SchemaKStream(joinSchema, joinedKStream, joinKey, + Arrays.asList(this, schemaKTable), Type.JOIN); + } + + public SchemaKStream selectKey(final Field newKeyField) { + if (keyField != null && + keyField.name().equals(newKeyField.name())) { + return this; + } + + KStream keyedKStream = kstream.selectKey(new KeyValueMapper() { + @Override + public String apply(String key, GenericRow value) { + + String + newKey = + value.getColumns().get(SchemaUtil.getFieldIndexByName(schema, newKeyField.name())) + .toString(); + return newKey; + } + }).map(new KeyValueMapper>() { + @Override + public KeyValue apply(String key, GenericRow row) { + row.getColumns().set(SchemaUtil.ROWKEY_NAME_INDEX, key); + return new KeyValue<>(key, row); + } + }); + + return new SchemaKStream(schema, keyedKStream, newKeyField, Arrays.asList(this), + Type.REKEY); + } + + public SchemaKGroupedStream groupByKey() { + KGroupedStream kgroupedStream = kstream.groupByKey(); + return new SchemaKGroupedStream(schema, kgroupedStream, keyField, Arrays.asList(this)); + } + + public SchemaKGroupedStream groupByKey(final Serde keySerde, + final Serde valSerde) { + KGroupedStream kgroupedStream = kstream.groupByKey(keySerde, valSerde); + return new SchemaKGroupedStream(schema, kgroupedStream, keyField, Arrays.asList(this)); + } + + public Field getKeyField() { + return keyField; + } + + public Schema getSchema() { + return schema; + } + + public KStream getKstream() { + return kstream; + } + + public List getSourceSchemaKStreams() { + return sourceSchemaKStreams; + } + + protected static class QueuePopulator implements ForeachAction { + private final SynchronousQueue> queue; + private final Optional limit; + private int counter = 0; + + public QueuePopulator(SynchronousQueue> queue, + Optional limit) { + this.queue = queue; + this.limit = limit; + } + + @Override + public void apply(K key, GenericRow row) { + try { + if (row == null) { + return; + } + if (limit.isPresent()) { + counter ++; + if (counter > limit.get()) { + throw new KsqlException("LIMIT reached for the partition."); + } + } + String keyString; + if (key instanceof Windowed) { + Windowed windowedKey = (Windowed) key; + keyString = String.format("%s : %s", windowedKey.key(), windowedKey.window()); + } else { + keyString = Objects.toString(key); + } + queue.put(new KeyValue<>(keyString, row)); + } catch (InterruptedException exception) { + log.error(" Exception while enqueuing the row: " + key + " : " + row); + log.error(" Exception: " + exception.getMessage()); + } + } + } + + public String getExecutionPlan(String indent) { + StringBuilder stringBuilder = new StringBuilder(); + stringBuilder.append(indent + " > [ " + type + " ] Schema: " + SchemaUtil + .getSchemaDefinitionString(schema) + ".\n"); + for (SchemaKStream schemaKStream: sourceSchemaKStreams) { + stringBuilder.append("\t" + indent + schemaKStream.getExecutionPlan(indent + "\t")); + } + return stringBuilder.toString(); + } + + protected void createSinkTopic(final String kafkaTopicName, KsqlConfig ksqlConfig, KafkaTopicClient kafkaTopicClient) { + int numberOfPartitions = (Integer) ksqlConfig.get(KsqlConfig.SINK_NUMBER_OF_PARTITIONS); + short numberOfReplications = (Short) ksqlConfig.get(KsqlConfig.SINK_NUMBER_OF_REPLICATIONS); + kafkaTopicClient.createTopic(kafkaTopicName, numberOfPartitions, numberOfReplications); + } +} diff --git a/ksql-core/src/main/java/io/confluent/ksql/structured/SchemaKTable.java b/ksql-core/src/main/java/io/confluent/ksql/structured/SchemaKTable.java new file mode 100644 index 000000000000..f940c55f2501 --- /dev/null +++ b/ksql-core/src/main/java/io/confluent/ksql/structured/SchemaKTable.java @@ -0,0 +1,194 @@ +/** + * Copyright 2017 Confluent Inc. + **/ + +package io.confluent.ksql.structured; + +import io.confluent.ksql.function.udf.Kudf; +import io.confluent.ksql.parser.tree.Expression; +import io.confluent.ksql.physical.GenericRow; +import io.confluent.ksql.util.ExpressionMetadata; +import io.confluent.ksql.util.ExpressionUtil; +import io.confluent.ksql.util.KafkaTopicClient; +import io.confluent.ksql.util.KsqlConfig; +import io.confluent.ksql.util.KsqlException; +import io.confluent.ksql.util.Pair; +import io.confluent.ksql.util.WindowedSerde; +import org.apache.kafka.common.serialization.Serde; +import org.apache.kafka.common.serialization.Serdes; +import org.apache.kafka.connect.data.Field; +import org.apache.kafka.connect.data.Schema; +import org.apache.kafka.connect.data.SchemaBuilder; +import org.apache.kafka.streams.KeyValue; +import org.apache.kafka.streams.kstream.KStream; +import org.apache.kafka.streams.kstream.KTable; +import org.apache.kafka.streams.kstream.KeyValueMapper; +import org.apache.kafka.streams.kstream.ValueMapper; +import org.apache.kafka.streams.kstream.Windowed; +import org.slf4j.Logger; +import org.slf4j.LoggerFactory; + +import java.lang.reflect.InvocationTargetException; +import java.util.ArrayList; +import java.util.Arrays; +import java.util.List; +import java.util.Optional; +import java.util.Set; +import java.util.concurrent.SynchronousQueue; + +public class SchemaKTable extends SchemaKStream { + + private static final Logger log = LoggerFactory.getLogger(SchemaKTable.class); + + private final KTable ktable; + private final boolean isWindowed; + + public SchemaKTable(final Schema schema, final KTable ktable, final Field keyField, + final List sourceSchemaKStreams, boolean isWindowed, + Type type) { + super(schema, null, keyField, sourceSchemaKStreams, type); + this.ktable = ktable; + this.isWindowed = isWindowed; + } + + @Override + public SchemaKTable into(final String kafkaTopicName, final Serde topicValueSerDe, + Set rowkeyIndexes, KsqlConfig ksqlConfig, KafkaTopicClient kafkaTopicClient) { + + createSinkTopic(kafkaTopicName, ksqlConfig, kafkaTopicClient); + + if (isWindowed) { + ktable.toStream() + .map(new KeyValueMapper, GenericRow, + KeyValue, GenericRow>>() { + @Override + public KeyValue, + GenericRow> apply(Windowed key, GenericRow row) { + if (row == null) { + return new KeyValue<>(key, null); + } + List columns = new ArrayList(); + for (int i = 0; i < row.getColumns().size(); i++) { + if (!rowkeyIndexes.contains(i)) { + columns.add(row.getColumns().get(i)); + } + } + return new KeyValue<>(key, new GenericRow(columns)); + } + }).to(new WindowedSerde(), topicValueSerDe, kafkaTopicName); + } else { + ktable.toStream() + .map(new KeyValueMapper>() { + @Override + public KeyValue apply(String key, GenericRow row) { + if (row == null) { + return new KeyValue<>(key, null); + } + List columns = new ArrayList(); + for (int i = 0; i < row.getColumns().size(); i++) { + if (!rowkeyIndexes.contains(i)) { + columns.add(row.getColumns().get(i)); + } + } + return new KeyValue<>(key, new GenericRow(columns)); + } + }).to(Serdes.String(), topicValueSerDe, kafkaTopicName); + } + + return this; + } + + @Override + public QueuedSchemaKStream toQueue(Optional limit) { + SynchronousQueue> rowQueue = new SynchronousQueue<>(); + ktable.toStream().foreach(new QueuePopulator(rowQueue, limit)); + return new QueuedSchemaKStream(this, rowQueue, Type.SINK); + } + + @Override + public SchemaKTable filter(final Expression filterExpression) throws Exception { + SqlPredicate predicate = new SqlPredicate(filterExpression, schema, isWindowed); + KTable filteredKTable = ktable.filter(predicate.getPredicate()); + return new SchemaKTable(schema, filteredKTable, keyField, Arrays.asList(this), isWindowed, + Type.FILTER); + } + + @Override + public SchemaKTable select(final List> expressionPairList) + throws Exception { + ExpressionUtil expressionUtil = new ExpressionUtil(); + // TODO: Optimize to remove the code gen for constants and single + // TODO: columns references and use them directly. + // TODO: Only use code get when we have real expression. + List expressionEvaluators = new ArrayList<>(); + SchemaBuilder schemaBuilder = SchemaBuilder.struct(); + for (Pair expressionPair : expressionPairList) { + ExpressionMetadata + expressionEvaluator = + expressionUtil.getExpressionEvaluator(expressionPair.getRight(), schema); + schemaBuilder.field(expressionPair.getLeft(), expressionEvaluator.getExpressionType()); + expressionEvaluators.add(expressionEvaluator); + } + + KTable projectedKTable = ktable.mapValues(new ValueMapper() { + @Override + public GenericRow apply(GenericRow row) { + try { + List newColumns = new ArrayList(); + for (int i = 0; i < expressionPairList.size(); i++) { + try { + int[] parameterIndexes = expressionEvaluators.get(i).getIndexes(); + Kudf[] kudfs = expressionEvaluators.get(i).getUdfs(); + Object[] parameterObjects = new Object[parameterIndexes.length]; + for (int j = 0; j < parameterIndexes.length; j++) { + if (parameterIndexes[j] < 0) { + parameterObjects[j] = kudfs[j]; + } else { + parameterObjects[j] = + genericRowValueTypeEnforcer.enforceFieldType(parameterIndexes[j], + row.getColumns() + .get(parameterIndexes[j])); + } + } + Object columnValue = null; + columnValue = expressionEvaluators.get(i).getExpressionEvaluator() + .evaluate(parameterObjects); + newColumns.add(columnValue); + } catch (Exception e) { + log.error("Error calculating column with index " + i + " : " + + expressionPairList.get(i).getLeft()); + newColumns.add(null); + } + } + GenericRow newRow = new GenericRow(newColumns); + return newRow; + } catch (Exception e) { + log.error("Projection exception for row: " + row.toString()); + log.error(e.getMessage(), e); + throw new KsqlException("Error in SELECT clause: " + e.getMessage(), e); + } + } + }); + + return new SchemaKTable(schemaBuilder.build(), projectedKTable, keyField, + Arrays.asList(this), isWindowed, Type.PROJECT); + } + + @Override + public KStream getKstream() { + return ktable.toStream(); + } + + public SchemaKStream toStream() { + return new SchemaKStream(schema, ktable.toStream(), keyField, sourceSchemaKStreams, + Type.TOSTREAM); + } + + public KTable getKtable() { + return ktable; + } + + public boolean isWindowed() { + return isWindowed; + } +} diff --git a/ksql-core/src/main/java/io/confluent/ksql/structured/SqlPredicate.java b/ksql-core/src/main/java/io/confluent/ksql/structured/SqlPredicate.java new file mode 100644 index 000000000000..6aea660bb4a6 --- /dev/null +++ b/ksql-core/src/main/java/io/confluent/ksql/structured/SqlPredicate.java @@ -0,0 +1,159 @@ +/** + * Copyright 2017 Confluent Inc. + **/ + +package io.confluent.ksql.structured; + +import io.confluent.ksql.function.udf.Kudf; +import io.confluent.ksql.parser.tree.Expression; +import io.confluent.ksql.physical.GenericRow; +import io.confluent.ksql.util.ExpressionMetadata; +import io.confluent.ksql.util.ExpressionUtil; +import io.confluent.ksql.util.GenericRowValueTypeEnforcer; +import io.confluent.ksql.util.SchemaUtil; +import org.apache.kafka.connect.data.Schema; +import org.apache.kafka.streams.kstream.Predicate; +import org.apache.kafka.streams.kstream.Windowed; +import org.codehaus.commons.compiler.CompilerFactoryFactory; +import org.codehaus.commons.compiler.IExpressionEvaluator; +import org.slf4j.Logger; +import org.slf4j.LoggerFactory; + +import java.lang.reflect.InvocationTargetException; +import java.util.Map; + +public class SqlPredicate { + + private Expression filterExpression; + private final Schema schema; + private IExpressionEvaluator ee; + private int[] columnIndexes; + private boolean isWindowedKey; + + private GenericRowValueTypeEnforcer genericRowValueTypeEnforcer; + private static final Logger log = LoggerFactory.getLogger(SqlPredicate.class); + + public SqlPredicate(final Expression filterExpression, final Schema schema, + boolean isWindowedKey) throws Exception { + this.filterExpression = filterExpression; + this.schema = schema; + this.genericRowValueTypeEnforcer = new GenericRowValueTypeEnforcer(schema); + this.isWindowedKey = isWindowedKey; + + ExpressionUtil expressionUtil = new ExpressionUtil(); + Map parameterMap = expressionUtil.getParameterInfo(filterExpression, schema); + + String[] parameterNames = new String[parameterMap.size()]; + Class[] parameterTypes = new Class[parameterMap.size()]; + columnIndexes = new int[parameterMap.size()]; + + int index = 0; + for (String parameterName : parameterMap.keySet()) { + parameterNames[index] = parameterName; + parameterTypes[index] = parameterMap.get(parameterName); + columnIndexes[index] = SchemaUtil.getFieldIndexByName(schema, parameterName); + index++; + } + + ee = CompilerFactoryFactory.getDefaultCompilerFactory().newExpressionEvaluator(); + + // The expression will have two "int" parameters: "a" and "b". + ee.setParameters(parameterNames, parameterTypes); + + // And the expression (i.e. "result") type is also "int". + ee.setExpressionType(boolean.class); + + String expressionStr = filterExpression.getCodegenString(schema); + + // And now we "cook" (scan, parse, compile and load) the fabulous expression. + ee.cook(expressionStr); + } + + public Predicate getPredicate() throws Exception { + if (isWindowedKey) { + return getWindowedKeyPredicate(); + } else { + return getStringKeyPredicate(); + } + } + + private Predicate getStringKeyPredicate() throws Exception { + ExpressionUtil expressionUtil = new ExpressionUtil(); + ExpressionMetadata + expressionEvaluator = + expressionUtil.getExpressionEvaluator(filterExpression, schema); + return new Predicate() { + @Override + public boolean test(String key, GenericRow row) { + try { + Kudf[] kudfs = expressionEvaluator.getUdfs(); + Object[] values = new Object[columnIndexes.length]; + for (int i = 0; i < values.length; i++) { + if (columnIndexes[i] < 0) { + values[i] = kudfs[i]; + } else { + values[i] = genericRowValueTypeEnforcer.enforceFieldType(columnIndexes[i], row + .getColumns().get(columnIndexes[i])); + } + } + boolean result = (Boolean) ee.evaluate(values); + return result; + } catch (Exception e) { + log.error(e.getMessage(), e); + } + log.error("Invalid format: " + key + " : " + row); + return false; + } + }; + } + + private Predicate getWindowedKeyPredicate() throws Exception { + ExpressionUtil expressionUtil = new ExpressionUtil(); + ExpressionMetadata + expressionEvaluator = + expressionUtil.getExpressionEvaluator(filterExpression, schema); + return new Predicate, GenericRow>() { + @Override + public boolean test(Windowed key, GenericRow row) { + try { + Kudf[] kudfs = expressionEvaluator.getUdfs(); + Object[] values = new Object[columnIndexes.length]; + for (int i = 0; i < values.length; i++) { + if (columnIndexes[i] < 0) { + values[i] = kudfs[i]; + } else { + values[i] = genericRowValueTypeEnforcer.enforceFieldType(columnIndexes[i], row + .getColumns().get(columnIndexes[i])); + } + } + boolean result = (Boolean) ee.evaluate(values); + return result; + } catch (Exception e) { + log.error(e.getMessage(), e); + } + log.error("Invalid format: " + key + " : " + row); + return false; + } + }; + } + + public Expression getFilterExpression() { + return filterExpression; + } + + public Schema getSchema() { + return schema; + } + + public int[] getColumnIndexes() { + return columnIndexes; + } + + public boolean isWindowedKey() { + return isWindowedKey; + } + + public GenericRowValueTypeEnforcer getGenericRowValueTypeEnforcer() { + return genericRowValueTypeEnforcer; + } +} diff --git a/ksql-core/src/main/java/io/confluent/ksql/util/DataSourceExtractor.java b/ksql-core/src/main/java/io/confluent/ksql/util/DataSourceExtractor.java new file mode 100644 index 000000000000..1ac2b328917c --- /dev/null +++ b/ksql-core/src/main/java/io/confluent/ksql/util/DataSourceExtractor.java @@ -0,0 +1,216 @@ +/** + * Copyright 2017 Confluent Inc. + **/ + +package io.confluent.ksql.util; + +import io.confluent.ksql.metastore.MetaStore; +import io.confluent.ksql.metastore.StructuredDataSource; +import io.confluent.ksql.parser.AstBuilder; +import io.confluent.ksql.parser.SqlBaseBaseVisitor; +import io.confluent.ksql.parser.SqlBaseParser; +import io.confluent.ksql.parser.tree.AliasedRelation; +import io.confluent.ksql.parser.tree.Node; +import io.confluent.ksql.parser.tree.NodeLocation; +import io.confluent.ksql.parser.tree.QualifiedName; +import io.confluent.ksql.parser.tree.Relation; +import io.confluent.ksql.parser.tree.Table; +import org.antlr.v4.runtime.ParserRuleContext; +import org.antlr.v4.runtime.Token; +import org.antlr.v4.runtime.tree.ParseTree; +import org.apache.kafka.connect.data.Field; +import org.apache.kafka.connect.data.Schema; + +import java.util.HashSet; +import java.util.List; +import java.util.Set; + +import static java.util.Objects.requireNonNull; +import static java.util.stream.Collectors.toList; + +public class DataSourceExtractor extends SqlBaseBaseVisitor { + + final MetaStore metaStore; + + Schema fromSchema; + Schema joinLeftSchema; + Schema joinRightSchema; + + String fromAlias; + String leftAlias; + String rightAlias; + + Set commonFieldNames = new HashSet<>(); + Set leftFieldNames = new HashSet<>(); + Set rightFieldNames = new HashSet<>(); + + boolean isJoin = false; + + public DataSourceExtractor(final MetaStore metaStore) { + + this.metaStore = metaStore; + } + + @Override + public Node visitQuerySpecification(final SqlBaseParser.QuerySpecificationContext ctx) { + Relation from = (Relation) visit(ctx.from); + return visitChildren(ctx); + } + + @Override + public Node visitTableName(final SqlBaseParser.TableNameContext context) { + return new Table(getLocation(context), getQualifiedName(context.qualifiedName())); + } + + @Override + public Node visitAliasedRelation(final SqlBaseParser.AliasedRelationContext context) { + Table table = (Table) visit(context.relationPrimary()); + + String alias = null; + if (context.children.size() == 1) { + alias = table.getName().getSuffix().toUpperCase(); + + } else if (context.children.size() == 2) { + alias = context.children.get(1).getText().toUpperCase(); + } + + if (!isJoin) { + this.fromAlias = alias; + StructuredDataSource + fromDataSource = + metaStore.getSource(table.getName().getSuffix()); + if (fromDataSource == null) { + throw new KsqlException(table.getName().getSuffix() + " does not exist."); + } + this.fromSchema = fromDataSource.getSchema(); + return null; + } + + // TODO: Figure out if the call to toUpperCase() here is really necessary + return new AliasedRelation(getLocation(context), table, alias.toUpperCase(), + getColumnAliases(context.columnAliases())); + + } + + @Override + public Node visitJoinRelation(final SqlBaseParser.JoinRelationContext context) { + this.isJoin = true; + AliasedRelation left = (AliasedRelation) visit(context.left); + AliasedRelation right; + if (context.CROSS() != null) { + right = (AliasedRelation) visit(context.right); + } else { + if (context.NATURAL() != null) { + right = (AliasedRelation) visit(context.right); + } else { + right = (AliasedRelation) visit(context.rightRelation); + } + } + + this.leftAlias = left.getAlias(); + StructuredDataSource + leftDataSource = + metaStore.getSource(((Table) left.getRelation()).getName().getSuffix()); + if (leftDataSource == null) { + throw new KsqlException(((Table) left.getRelation()).getName().getSuffix() + " does not " + + "exist."); + } + this.joinLeftSchema = leftDataSource.getSchema(); + + this.rightAlias = right.getAlias(); + StructuredDataSource + rightDataSource = + metaStore.getSource(((Table) right.getRelation()).getName().getSuffix()); + if (rightDataSource == null) { + throw new KsqlException(((Table) right.getRelation()).getName().getSuffix() + " does not " + + "exist."); + } + this.joinRightSchema = rightDataSource.getSchema(); + + return null; + } + + + public void extractDataSources(final ParseTree node) { + visit(node); + if (joinLeftSchema != null) { + for (Field field : joinLeftSchema.fields()) { + leftFieldNames.add(field.name()); + } + for (Field field : joinRightSchema.fields()) { + rightFieldNames.add(field.name()); + if (leftFieldNames.contains(field.name())) { + commonFieldNames.add(field.name()); + } + } + } + } + + public MetaStore getMetaStore() { + return metaStore; + } + + public Schema getJoinLeftSchema() { + return joinLeftSchema; + } + + public String getFromAlias() { + return fromAlias; + } + + public String getLeftAlias() { + return leftAlias; + } + + public String getRightAlias() { + return rightAlias; + } + + public Set getCommonFieldNames() { + return commonFieldNames; + } + + public Set getLeftFieldNames() { + return leftFieldNames; + } + + public Set getRightFieldNames() { + return rightFieldNames; + } + + private static String unquote(String value, String quote) { + return value.substring(1, value.length() - 1) + .replace(quote + quote, quote); + } + + private static QualifiedName getQualifiedName(SqlBaseParser.QualifiedNameContext context) { + List parts = context + .identifier().stream() + .map(AstBuilder::getIdentifierText) + .collect(toList()); + + return QualifiedName.of(parts); + } + + private static List getColumnAliases( + SqlBaseParser.ColumnAliasesContext columnAliasesContext) { + if (columnAliasesContext == null) { + return null; + } + + return columnAliasesContext + .identifier().stream() + .map(AstBuilder::getIdentifierText) + .collect(toList()); + } + + public static NodeLocation getLocation(ParserRuleContext parserRuleContext) { + requireNonNull(parserRuleContext, "parserRuleContext is null"); + return getLocation(parserRuleContext.getStart()); + } + + public static NodeLocation getLocation(Token token) { + requireNonNull(token, "token is null"); + return new NodeLocation(token.getLine(), token.getCharPositionInLine()); + } +} diff --git a/ksql-core/src/main/java/io/confluent/ksql/util/ExpressionMetadata.java b/ksql-core/src/main/java/io/confluent/ksql/util/ExpressionMetadata.java new file mode 100644 index 000000000000..fb42955bd284 --- /dev/null +++ b/ksql-core/src/main/java/io/confluent/ksql/util/ExpressionMetadata.java @@ -0,0 +1,41 @@ +/** + * Copyright 2017 Confluent Inc. + **/ + +package io.confluent.ksql.util; + +import io.confluent.ksql.function.udf.Kudf; +import org.apache.kafka.connect.data.Schema; +import org.codehaus.commons.compiler.IExpressionEvaluator; + +public class ExpressionMetadata { + + private final IExpressionEvaluator expressionEvaluator; + private final int[] indexes; + private final Kudf[] udfs; + private final Schema expressionType; + + public ExpressionMetadata(IExpressionEvaluator expressionEvaluator, int[] indexes, Kudf[] udfs, + Schema expressionType) { + this.expressionEvaluator = expressionEvaluator; + this.indexes = indexes; + this.udfs = udfs; + this.expressionType = expressionType; + } + + public IExpressionEvaluator getExpressionEvaluator() { + return expressionEvaluator; + } + + public int[] getIndexes() { + return indexes; + } + + public Kudf[] getUdfs() { + return udfs; + } + + public Schema getExpressionType() { + return expressionType; + } +} diff --git a/ksql-core/src/main/java/io/confluent/ksql/util/ExpressionTypeManager.java b/ksql-core/src/main/java/io/confluent/ksql/util/ExpressionTypeManager.java new file mode 100644 index 000000000000..ea4a51b806b3 --- /dev/null +++ b/ksql-core/src/main/java/io/confluent/ksql/util/ExpressionTypeManager.java @@ -0,0 +1,197 @@ +/** + * Copyright 2017 Confluent Inc. + **/ + +package io.confluent.ksql.util; + +import com.google.common.collect.ImmutableMap; +import io.confluent.ksql.function.KsqlFunctions; +import io.confluent.ksql.function.KsqlAggregateFunction; +import io.confluent.ksql.function.KsqlFunction; +import io.confluent.ksql.parser.tree.ArithmeticBinaryExpression; +import io.confluent.ksql.parser.tree.BooleanLiteral; +import io.confluent.ksql.parser.tree.Cast; +import io.confluent.ksql.parser.tree.ComparisonExpression; +import io.confluent.ksql.parser.tree.DefaultAstVisitor; +import io.confluent.ksql.parser.tree.DereferenceExpression; +import io.confluent.ksql.parser.tree.DoubleLiteral; +import io.confluent.ksql.parser.tree.Expression; +import io.confluent.ksql.parser.tree.FunctionCall; +import io.confluent.ksql.parser.tree.IsNotNullPredicate; +import io.confluent.ksql.parser.tree.IsNullPredicate; +import io.confluent.ksql.parser.tree.LikePredicate; +import io.confluent.ksql.parser.tree.LongLiteral; +import io.confluent.ksql.parser.tree.QualifiedNameReference; +import io.confluent.ksql.parser.tree.StringLiteral; +import io.confluent.ksql.parser.tree.SubscriptExpression; +import io.confluent.ksql.planner.PlanException; +import org.apache.kafka.connect.data.Field; +import org.apache.kafka.connect.data.Schema; + +import java.util.Optional; + +public class ExpressionTypeManager + extends DefaultAstVisitor { + + private final Schema schema; + + public ExpressionTypeManager(Schema schema) { + this.schema = schema; + } + + public Schema getExpressionType(final Expression expression) { + ExpressionTypeContext expressionTypeContext = new ExpressionTypeContext(); + process(expression, expressionTypeContext); + return expressionTypeContext.getSchema(); + } + + class ExpressionTypeContext { + + Schema schema; + + public Schema getSchema() { + return schema; + } + + public void setSchema(Schema schema) { + this.schema = schema; + } + } + + @Override + protected Expression visitArithmeticBinary(final ArithmeticBinaryExpression node, + final ExpressionTypeContext expressionTypeContext) { + process(node.getLeft(), expressionTypeContext); + Schema leftType = expressionTypeContext.getSchema(); + process(node.getRight(), expressionTypeContext); + Schema rightType = expressionTypeContext.getSchema(); + expressionTypeContext.setSchema(resolveArithmaticType(leftType, rightType)); + return null; + } + + protected Expression visitCast(final Cast node, + final ExpressionTypeContext expressionTypeContext) { + + Schema castType = SchemaUtil.getTypeSchema(node.getType()); + expressionTypeContext.setSchema(castType); + + return null; + } + + @Override + protected Expression visitComparisonExpression( + final ComparisonExpression node, final ExpressionTypeContext expressionTypeContext) { + expressionTypeContext.setSchema(Schema.BOOLEAN_SCHEMA); + return null; + } + + @Override + protected Expression visitQualifiedNameReference( + final QualifiedNameReference node, final ExpressionTypeContext expressionTypeContext) { + Optional schemaField = SchemaUtil.getFieldByName(schema, node.getName().getSuffix()); + if (!schemaField.isPresent()) { + throw new KsqlException(String.format("Invalid Expression %s.", node.toString())); + } + expressionTypeContext.setSchema(schemaField.get().schema()); + return null; + } + + @Override + protected Expression visitDereferenceExpression( + final DereferenceExpression node, final ExpressionTypeContext expressionTypeContext) { + Optional schemaField = SchemaUtil.getFieldByName(schema, node.toString()); + if (!schemaField.isPresent()) { + throw new KsqlException(String.format("Invalid Expression %s.", node.toString())); + } + expressionTypeContext.setSchema(schemaField.get().schema()); + return null; + } + + protected Expression visitStringLiteral(final StringLiteral node, + final ExpressionTypeContext expressionTypeContext) { + expressionTypeContext.setSchema(Schema.STRING_SCHEMA); + return null; + } + + protected Expression visitBooleanLiteral(final BooleanLiteral node, + final ExpressionTypeContext expressionTypeContext) { + expressionTypeContext.setSchema(Schema.BOOLEAN_SCHEMA); + return null; + } + + protected Expression visitLongLiteral(final LongLiteral node, + final ExpressionTypeContext expressionTypeContext) { + expressionTypeContext.setSchema(Schema.INT64_SCHEMA); + return null; + } + + protected Expression visitDoubleLiteral(final DoubleLiteral node, + final ExpressionTypeContext expressionTypeContext) { + expressionTypeContext.setSchema(Schema.FLOAT64_SCHEMA); + return null; + } + + protected Expression visitLikePredicate(LikePredicate node, + ExpressionTypeContext expressionTypeContext) { + expressionTypeContext.setSchema(Schema.BOOLEAN_SCHEMA); + return null; + } + + protected Expression visitIsNotNullPredicate(IsNotNullPredicate node, + ExpressionTypeContext expressionTypeContext) { + expressionTypeContext.setSchema(Schema.BOOLEAN_SCHEMA); + return null; + } + + protected Expression visitIsNullPredicate(IsNullPredicate node, + ExpressionTypeContext expressionTypeContext) { + expressionTypeContext.setSchema(Schema.BOOLEAN_SCHEMA); + return null; + } + + protected Expression visitSubscriptExpression( + final SubscriptExpression node, final ExpressionTypeContext expressionTypeContext) { + String arrayBaseName = node.getBase().toString(); + Optional schemaField = SchemaUtil.getFieldByName(schema, arrayBaseName); + if (!schemaField.isPresent()) { + throw new KsqlException(String.format("Invalid Expression %s.", node.toString())); + } + expressionTypeContext.setSchema(schemaField.get().schema().valueSchema()); + return null; + } + + protected Expression visitFunctionCall(final FunctionCall node, + final ExpressionTypeContext expressionTypeContext) { + + KsqlFunction ksqlFunction = KsqlFunctions.getFunction(node.getName().getSuffix()); + if (ksqlFunction != null) { + expressionTypeContext.setSchema(ksqlFunction.getReturnType()); + } else if (KsqlFunctions.isAnAggregateFunction(node.getName().getSuffix())) { + KsqlAggregateFunction ksqlAggregateFunction = + KsqlFunctions.getAggregateFunction( + node.getName().getSuffix(), node.getArguments(), schema); + expressionTypeContext.setSchema(ksqlAggregateFunction.getReturnType()); + } else { + throw new KsqlException("Unknown function: " + node.getName().toString()); + } + return null; + } + + private Schema resolveArithmaticType(final Schema leftSchema, + final Schema rightSchema) { + if (leftSchema == rightSchema) { + return leftSchema; + } else if ((leftSchema == Schema.STRING_SCHEMA) || (rightSchema == Schema.STRING_SCHEMA)) { + throw new PlanException("Incompatible types."); + } else if ((leftSchema == Schema.BOOLEAN_SCHEMA) || (rightSchema == Schema.BOOLEAN_SCHEMA)) { + throw new PlanException("Incompatible types."); + } else if ((leftSchema == Schema.FLOAT64_SCHEMA) || (rightSchema == Schema.FLOAT64_SCHEMA)) { + return Schema.FLOAT64_SCHEMA; + } else if ((leftSchema == Schema.INT64_SCHEMA) || (rightSchema == Schema.INT64_SCHEMA)) { + return Schema.INT64_SCHEMA; + } else if ((leftSchema == Schema.INT32_SCHEMA) || (rightSchema == Schema.INT32_SCHEMA)) { + return Schema.INT32_SCHEMA; + } + throw new PlanException("Unsupported types."); + } +} diff --git a/ksql-core/src/main/java/io/confluent/ksql/util/ExpressionUtil.java b/ksql-core/src/main/java/io/confluent/ksql/util/ExpressionUtil.java new file mode 100644 index 000000000000..cdc665e989e5 --- /dev/null +++ b/ksql-core/src/main/java/io/confluent/ksql/util/ExpressionUtil.java @@ -0,0 +1,190 @@ +/** + * Copyright 2017 Confluent Inc. + **/ + +package io.confluent.ksql.util; + +import io.confluent.ksql.function.KsqlFunction; +import io.confluent.ksql.function.KsqlFunctions; +import io.confluent.ksql.function.udf.Kudf; +import io.confluent.ksql.parser.tree.ArithmeticBinaryExpression; +import io.confluent.ksql.parser.tree.AstVisitor; +import io.confluent.ksql.parser.tree.Cast; +import io.confluent.ksql.parser.tree.ComparisonExpression; +import io.confluent.ksql.parser.tree.DereferenceExpression; +import io.confluent.ksql.parser.tree.Expression; +import io.confluent.ksql.parser.tree.FunctionCall; +import io.confluent.ksql.parser.tree.IsNotNullPredicate; +import io.confluent.ksql.parser.tree.IsNullPredicate; +import io.confluent.ksql.parser.tree.LikePredicate; +import io.confluent.ksql.parser.tree.LogicalBinaryExpression; +import io.confluent.ksql.parser.tree.NotExpression; +import io.confluent.ksql.parser.tree.QualifiedNameReference; +import io.confluent.ksql.parser.tree.SubscriptExpression; +import org.apache.kafka.connect.data.Field; +import org.apache.kafka.connect.data.Schema; +import org.codehaus.commons.compiler.CompilerFactoryFactory; +import org.codehaus.commons.compiler.IExpressionEvaluator; + +import java.util.HashMap; +import java.util.Map; +import java.util.Optional; + +public class ExpressionUtil { + + public Map getParameterInfo(final Expression expression, final Schema schema) { + Visitor visitor = new Visitor(schema); + visitor.process(expression, null); + return visitor.parameterMap; + } + + public ExpressionMetadata getExpressionEvaluator( + final Expression expression, + final Schema schema) throws Exception { + ExpressionUtil expressionUtil = new ExpressionUtil(); + Map parameterMap = expressionUtil.getParameterInfo(expression, schema); + + String[] parameterNames = new String[parameterMap.size()]; + Class[] parameterTypes = new Class[parameterMap.size()]; + int[] columnIndexes = new int[parameterMap.size()]; + Kudf[] kudfObjects = new Kudf[parameterMap.size()]; + + int index = 0; + for (String parameterName : parameterMap.keySet()) { + parameterNames[index] = parameterName; + parameterTypes[index] = parameterMap.get(parameterName); + columnIndexes[index] = SchemaUtil.getFieldIndexByName(schema, parameterName); + if (columnIndexes[index] < 0) { + kudfObjects[index] = (Kudf) parameterMap.get(parameterName).newInstance(); + } else { + kudfObjects[index] = null; + } + index++; + } + + String expressionStr = expression.getCodegenString(schema); + IExpressionEvaluator + ee = + CompilerFactoryFactory.getDefaultCompilerFactory().newExpressionEvaluator(); + + // The expression will have two "int" parameters: "a" and "b". + ee.setParameters(parameterNames, parameterTypes); + + // And the expression (i.e. "result") type is also "int". + ExpressionTypeManager expressionTypeManager = new ExpressionTypeManager(schema); + Schema expressionType = expressionTypeManager.getExpressionType(expression); + + ee.setExpressionType(SchemaUtil.getJavaType(expressionType)); + + // And now we "cook" (scan, parse, compile and load) the fabulous expression. + ee.cook(expressionStr); + + return new ExpressionMetadata(ee, columnIndexes, kudfObjects, expressionType); + } + + private class Visitor + extends AstVisitor { + + final Schema schema; + final Map parameterMap; + + Visitor(Schema schema) { + this.schema = schema; + this.parameterMap = new HashMap<>(); + } + + protected Object visitLikePredicate(LikePredicate node, Object context) { + process(node.getValue(), null); + return null; + } + + protected Object visitFunctionCall(FunctionCall node, Object context) { + String functionName = node.getName().getSuffix(); + KsqlFunction ksqlFunction = KsqlFunctions.getFunction(functionName); + parameterMap.put(node.getName().getSuffix(), + ksqlFunction.getKudfClass()); + for (Expression argExpr : node.getArguments()) { + process(argExpr, null); + } + return null; + } + + protected Object visitArithmeticBinary(ArithmeticBinaryExpression node, Object context) { + process(node.getLeft(), null); + process(node.getRight(), null); + return null; + } + + protected Object visitIsNotNullPredicate(IsNotNullPredicate node, Object context) { + return process(node.getValue(), context); + } + + protected Object visitIsNullPredicate(IsNullPredicate node, Object context) { + return process(node.getValue(), context); + } + + protected Object visitLogicalBinaryExpression(LogicalBinaryExpression node, Object context) { + process(node.getLeft(), null); + process(node.getRight(), null); + return null; + } + + @Override + protected Object visitComparisonExpression(ComparisonExpression node, Object context) { + process(node.getLeft(), null); + process(node.getRight(), null); + return null; + } + + @Override + protected Object visitNotExpression(NotExpression node, Object context) { + return process(node.getValue(), null); + } + + @Override + protected Object visitDereferenceExpression(DereferenceExpression node, Object context) { + Optional schemaField = SchemaUtil.getFieldByName(schema, node.toString()); + if (!schemaField.isPresent()) { + throw new RuntimeException( + "Cannot find the select field in the available fields: " + node.toString()); + } + parameterMap.put(schemaField.get().name().replace(".", "_"), + SchemaUtil.getJavaType(schemaField.get().schema())); + return null; + } + + @Override + protected Object visitCast(Cast node, Object context) { + + process(node.getExpression(), context); + return null; + } + + @Override + protected Object visitSubscriptExpression(SubscriptExpression node, Object context) { + String arrayBaseName = node.getBase().toString(); + Optional schemaField = SchemaUtil.getFieldByName(schema, arrayBaseName); + if (!schemaField.isPresent()) { + throw new RuntimeException( + "Cannot find the select field in the available fields: " + arrayBaseName); + } + parameterMap.put(schemaField.get().name().replace(".", "_"), + SchemaUtil.getJavaType(schemaField.get().schema())); + process(node.getIndex(), context); + return null; + } + + @Override + protected Object visitQualifiedNameReference(QualifiedNameReference node, Object context) { + Optional schemaField = SchemaUtil.getFieldByName(schema, node.getName().getSuffix()); + if (!schemaField.isPresent()) { + throw new RuntimeException( + "Cannot find the select field in the available fields: " + node.getName().getSuffix()); + } + parameterMap.put(schemaField.get().name().replace(".", "_"), + SchemaUtil.getJavaType(schemaField.get().schema())); + return null; + } + } + +} diff --git a/ksql-core/src/main/java/io/confluent/ksql/util/GenericRowValueTypeEnforcer.java b/ksql-core/src/main/java/io/confluent/ksql/util/GenericRowValueTypeEnforcer.java new file mode 100644 index 000000000000..4f2a3567a4f6 --- /dev/null +++ b/ksql-core/src/main/java/io/confluent/ksql/util/GenericRowValueTypeEnforcer.java @@ -0,0 +1,133 @@ +/** + * Copyright 2017 Confluent Inc. + **/ + +package io.confluent.ksql.util; + +import org.apache.kafka.connect.data.Field; +import org.apache.kafka.connect.data.Schema; + +import java.util.List; + +public class GenericRowValueTypeEnforcer { + + private final Schema schema; + private final List fields; + + public GenericRowValueTypeEnforcer(final Schema schema) { + this.schema = schema; + this.fields = schema.fields(); + } + + public Object enforceFieldType(final int index, final Object value) { + Field field = fields.get(index); + return enforceFieldType(field.schema(), value); + } + + public Object enforceFieldType(Schema schema, final Object value) { + if (schema == Schema.FLOAT64_SCHEMA) { + return enforceDouble(value); + } else if (schema == Schema.INT64_SCHEMA) { + return enforceLong(value); + } else if (schema == Schema.INT32_SCHEMA) { + return enforceInteger(value); + } else if (schema == Schema.STRING_SCHEMA) { + return enforceString(value); + } else if (schema == Schema.BOOLEAN_SCHEMA) { + return enforceBoolean(value); + } else if (schema.type() == Schema.Type.ARRAY) { + return value; + } else if (schema.type() == Schema.Type.MAP) { + return value; + } else { + throw new KsqlException("Type is not supported: " + schema); + } + } + + Double enforceDouble(final Object value) { + if (value instanceof Double) { + return (Double) value; + } else if (value instanceof Integer) { + return ((Integer) value).doubleValue(); + } else if (value instanceof Long) { + return ((Long) value).doubleValue(); + } else if (value instanceof Float) { + return ((Float) value).doubleValue(); + } else if (value instanceof Short) { + return ((Short) value).doubleValue(); + } else if (value instanceof Byte) { + return ((Byte) value).doubleValue(); + } else if (value instanceof String || value instanceof CharSequence) { + return Double.parseDouble(value.toString()); + } else if (value == null) { + return null; + } else { + throw new KsqlException("Invalif field type. Value must be Double."); + } + } + + Long enforceLong(final Object value) { + if (value instanceof Long) { + return (Long) value; + } else if (value instanceof Integer) { + return ((Integer) value).longValue(); + } else if (value instanceof Long) { + return ((Long) value).longValue(); + } else if (value instanceof Float) { + return ((Float) value).longValue(); + } else if (value instanceof Short) { + return ((Short) value).longValue(); + } else if (value instanceof Byte) { + return ((Byte) value).longValue(); + } else if (value instanceof String || value instanceof CharSequence) { + return Long.parseLong(value.toString()); + } else if (value == null) { + return null; + } else { + throw new KsqlException("Invalif field type. Value must be Long."); + } + } + + Integer enforceInteger(final Object value) { + + if (value instanceof Integer) { + return (Integer) value; + } else if (value instanceof Long) { + return ((Long) value).intValue(); + } else if (value instanceof Float) { + return ((Float) value).intValue(); + } else if (value instanceof Short) { + return ((Short) value).intValue(); + } else if (value instanceof Byte) { + return ((Byte) value).intValue(); + } else if (value instanceof String || value instanceof CharSequence) { + return Integer.parseInt(value.toString()); + } else if (value == null) { + return null; + } else { + throw new KsqlException("Invalif field type. Value must be Integer."); + } + } + + String enforceString(final Object value) { + if (value instanceof String || value instanceof CharSequence) { + return value.toString(); + } else if (value == null) { + return null; + } else { + throw new KsqlException("Invalif field type. Value must be String."); + } + } + + Boolean enforceBoolean(final Object value) { + if (value instanceof Boolean) { + return (Boolean) value; + } else if (value instanceof String) { + return Boolean.parseBoolean(value.toString()); + } else if (value == null) { + return null; + } else { + throw new KsqlException("Invalif field type. Value must be Boolean."); + } + } +} diff --git a/ksql-core/src/main/java/io/confluent/ksql/util/KafkaTopicClient.java b/ksql-core/src/main/java/io/confluent/ksql/util/KafkaTopicClient.java new file mode 100644 index 000000000000..3564ea6cf9fe --- /dev/null +++ b/ksql-core/src/main/java/io/confluent/ksql/util/KafkaTopicClient.java @@ -0,0 +1,56 @@ +/** + * Copyright 2017 Confluent Inc. + **/ + +package io.confluent.ksql.util; + +import io.confluent.ksql.exception.KafkaResponseGetFailedException; +import io.confluent.ksql.exception.KafkaTopicException; +import org.apache.kafka.clients.admin.TopicDescription; + +import java.io.Closeable; +import java.util.Collection; +import java.util.Map; +import java.util.Set; + +public interface KafkaTopicClient extends Closeable { + + /** + * Create a new topic with the specified name, numPartitions and replicatonFactor. + * [warn] synchronous call to get the response + * @param topic name of the topic to create + * @param numPartitions + * @param replicatonFactor + * @throws KafkaTopicException + * @throws KafkaResponseGetFailedException + */ + void createTopic(String topic, int numPartitions, short replicatonFactor); + + /** + * [warn] synchronous call to get the response + * @param topic name of the topic + * @return whether the topic exists or not + * @throws KafkaResponseGetFailedException + */ + boolean isTopicExists(String topic); + + /** + * [warn] synchronous call to get the response + * @return set of existing topic names + * @throws KafkaResponseGetFailedException + */ + Set listTopicNames(); + + /** + * [warn] synchronous call to get the response + * @param topicNames topicNames to describe + * @throws KafkaResponseGetFailedException + */ + Map describeTopics(Collection topicNames); + + /** + * Close the underlying Kafka admin client. + */ + void close(); + +} diff --git a/ksql-core/src/main/java/io/confluent/ksql/util/KafkaTopicClientImpl.java b/ksql-core/src/main/java/io/confluent/ksql/util/KafkaTopicClientImpl.java new file mode 100644 index 000000000000..573d486dbb6c --- /dev/null +++ b/ksql-core/src/main/java/io/confluent/ksql/util/KafkaTopicClientImpl.java @@ -0,0 +1,82 @@ +/** + * Copyright 2017 Confluent Inc. + **/ + +package io.confluent.ksql.util; + +import io.confluent.ksql.exception.KafkaResponseGetFailedException; +import io.confluent.ksql.exception.KafkaTopicException; +import org.apache.kafka.clients.admin.AdminClient; +import org.apache.kafka.clients.admin.NewTopic; +import org.apache.kafka.clients.admin.TopicDescription; +import org.slf4j.Logger; +import org.slf4j.LoggerFactory; + +import java.util.Arrays; +import java.util.Collection; +import java.util.Collections; +import java.util.Map; +import java.util.Set; +import java.util.concurrent.ExecutionException; + +public class KafkaTopicClientImpl implements KafkaTopicClient { + private static final Logger log = LoggerFactory.getLogger(KafkaTopicClient.class); + private final KsqlConfig ksqlConfig; + + public KafkaTopicClientImpl(final KsqlConfig ksqlConfig) { + this.ksqlConfig = ksqlConfig.clone(); + } + + public void createTopic(String topic, int numPartitions, short replicatonFactor) { + log.info("Creating topic '{}'", topic); + if (isTopicExists(topic)) { + Map topicDescriptions = describeTopics(Arrays.asList(topic)); + TopicDescription topicDescription = topicDescriptions.get(topic); + if (topicDescription.partitions().size() != numPartitions || + topicDescription.partitions().get(0).replicas().size() != replicatonFactor) { + throw new KafkaTopicException(String.format( + "Topic '%s' does not conform to the requirements Partitions:%d v %d. Replication: %d v %d", topic, + topicDescription.partitions().size(), numPartitions, + topicDescription.partitions().get(0).replicas().size(), replicatonFactor + )); + } + // Topic with the partitons and replicas exists, reuse it! + return; + } + NewTopic newTopic = new NewTopic(topic, numPartitions, replicatonFactor); + try { + AdminClient.create(ksqlConfig.getKsqlConfigProps()) + .createTopics(Collections.singleton(newTopic)).all().get(); + } catch (InterruptedException | ExecutionException e) { + throw new KafkaResponseGetFailedException("Failed to guarantee existence of topic " + + topic, e); + } + } + + public boolean isTopicExists(String topic) { + log.debug("Checking for existence of topic '{}'", topic); + return listTopicNames().contains(topic); + } + + public Set listTopicNames() { + try { + return AdminClient.create(ksqlConfig.getKsqlConfigProps()) + .listTopics().names().get(); + } catch (InterruptedException | ExecutionException e) { + throw new KafkaResponseGetFailedException("Failed to retrieve kafka topic names", e); + } + } + + public Map describeTopics(Collection topicNames) { + try { + return AdminClient.create(ksqlConfig.getKsqlConfigProps()) + .describeTopics(topicNames).all().get(); + } catch (InterruptedException | ExecutionException e) { + throw new KafkaResponseGetFailedException("Failed to describe kafka topics", e); + } + } + + public void close() { + } + +} \ No newline at end of file diff --git a/ksql-core/src/main/java/io/confluent/ksql/util/KsqlConfig.java b/ksql-core/src/main/java/io/confluent/ksql/util/KsqlConfig.java new file mode 100644 index 000000000000..6cc5db64c1c3 --- /dev/null +++ b/ksql-core/src/main/java/io/confluent/ksql/util/KsqlConfig.java @@ -0,0 +1,164 @@ +/** + * Copyright 2017 Confluent Inc. + **/ + +package io.confluent.ksql.util; + +import org.apache.kafka.clients.consumer.ConsumerConfig; +import org.apache.kafka.common.config.AbstractConfig; +import org.apache.kafka.common.config.ConfigDef; +import org.apache.kafka.streams.StreamsConfig; + +import java.util.HashMap; +import java.util.Map; + +public class KsqlConfig extends AbstractConfig { + + public static final String KSQL_TIMESTAMP_COLUMN_INDEX = "ksq.timestamp.column.index"; + public static final String SINK_TIMESTAMP_COLUMN_NAME = "TIMESTAMP"; + + public static final String SINK_NUMBER_OF_PARTITIONS = "PARTITIONS"; + public static final String DEFAULT_SINK_NUMBER_OF_PARTITIONS = "ksql.sink.partitions.default"; + public static final String SINK_NUMBER_OF_REPLICATIONS = "REPLICATIONS"; + public static final String DEFAULT_SINK_NUMBER_OF_REPLICATIONS = "ksql.sink.replications.default"; + public static final String SINK_WINDOW_CHANGE_LOG_ADDITIONAL_RETENTION = + "WINDOW_CHANGE_LOG_ADDITIONAL_RETENTION"; + public static final String DEFAULT_SINK_WINDOW_CHANGE_LOG_ADDITIONAL_RETENTION = + "ksql.sink.window.change.log.additional.retention.default"; + + + public static final String + KSQL_SERVICE_ID_CONFIG = "ksql.service.id"; + public static final ConfigDef.Type + KSQL_SERVICE_ID_TYPE = ConfigDef.Type.STRING; + public static final String + KSQL_SERVICE_ID_DEFAULT = "ksql_"; + public static final ConfigDef.Importance + KSQL_SERVICE_ID_IMPORTANCE = ConfigDef.Importance.MEDIUM; + public static final String + KSQL_SERVICE_ID_DOC = + "Indicates the ID of the ksql service. It will be used as prefix for all KSQL queires in " + + "this service."; + + public static final String + KSQL_PERSISTENT_QUERY_NAME_PREFIX_CONFIG = "ksql.persistent.prefix"; + public static final ConfigDef.Type + KSQL_PERSISTENT_QUERY_NAME_PREFIX_TYPE = ConfigDef.Type.STRING; + public static final String + KSQL_PERSISTENT_QUERY_NAME_PREFIX_DEFAULT = "query_"; + public static final ConfigDef.Importance + KSQL_PERSISTENT_QUERY_NAME_PREFIX_IMPORTANCE = ConfigDef.Importance.MEDIUM; + public static final String + KSQL_PERSISTENT_QUERY_NAME_PREFIX_DOC = + "Second part of the prefix for persitent queries."; + + public static final String + KSQL_TRANSIENT_QUERY_NAME_PREFIX_CONFIG = "ksql.transient.prefix"; + public static final ConfigDef.Type + KSQL_TRANSIENT_QUERY_NAME_PREFIX_TYPE = ConfigDef.Type.STRING; + public static final String + KSQL_TRANSIENT_QUERY_NAME_PREFIX_DEFAULT = "transient_"; + public static final ConfigDef.Importance + KSQL_TRANSIENT_QUERY_NAME_PREFIX_IMPORTANCE = ConfigDef.Importance.MEDIUM; + public static final String + KSQL_TRANSIENT_QUERY_NAME_PREFIX_DOC = + "Second part of the prefix for transient queries."; + + + public static final String + KSQL_TABLE_STATESTORE_NAME_SUFFIX_CONFIG = "ksql.statestore.suffix"; + public static final ConfigDef.Type + KSQL_TABLE_STATESTORE_NAME_SUFFIX_TYPE = ConfigDef.Type.STRING; + public static final String + KSQL_TABLE_STATESTORE_NAME_SUFFIX_DEFAULT = "transient_"; + public static final ConfigDef.Importance + KSQL_TABLE_STATESTORE_NAME_SUFFIX_IMPORTANCE = ConfigDef.Importance.MEDIUM; + public static final String + KSQL_TABLE_STATESTORE_NAME_SUFFIX_DOC = + "Suffix for state store names in Tables."; + + public int defaultSinkNumberOfPartitions = 4; + public short defaultSinkNumberOfReplications = 1; + // TODO: Find out the best default value. + public long defaultSinkWindowChangeLogAdditionalRetention = 1000000; + + public String defaultAutoOffsetRestConfig = "latest"; + public long defaultCommitIntervalMsConfig = 2000; + public long defaultCacheMaxBytesBufferingConfig = 10000000; + public int defaultNumberOfStreamsThreads = 4; + + Map ksqlConfigProps; + + private static final ConfigDef CONFIG_DEF = new ConfigDef(StreamsConfig.configDef()); + + public KsqlConfig(Map props) { + super(CONFIG_DEF, props); + + ksqlConfigProps = new HashMap<>(); + ksqlConfigProps.put(KSQL_SERVICE_ID_CONFIG, KSQL_SERVICE_ID_DEFAULT); + ksqlConfigProps.put(KSQL_PERSISTENT_QUERY_NAME_PREFIX_CONFIG, KSQL_PERSISTENT_QUERY_NAME_PREFIX_DEFAULT); + ksqlConfigProps.put(KSQL_TRANSIENT_QUERY_NAME_PREFIX_CONFIG, KSQL_TRANSIENT_QUERY_NAME_PREFIX_DEFAULT); + ksqlConfigProps.put(KSQL_TABLE_STATESTORE_NAME_SUFFIX_CONFIG, KSQL_TABLE_STATESTORE_NAME_SUFFIX_DEFAULT); + + if (props.containsKey(DEFAULT_SINK_NUMBER_OF_PARTITIONS)) { + ksqlConfigProps.put(SINK_NUMBER_OF_PARTITIONS, Integer.parseInt(props.get + (DEFAULT_SINK_NUMBER_OF_PARTITIONS).toString())); + } else { + ksqlConfigProps.put(SINK_NUMBER_OF_PARTITIONS, defaultSinkNumberOfPartitions); + } + + if (props.containsKey(DEFAULT_SINK_NUMBER_OF_REPLICATIONS)) { + ksqlConfigProps.put(SINK_NUMBER_OF_REPLICATIONS, Short.parseShort(props.get + (DEFAULT_SINK_NUMBER_OF_REPLICATIONS).toString())); + } else { + ksqlConfigProps.put(SINK_NUMBER_OF_REPLICATIONS, defaultSinkNumberOfReplications); + } + + if (props.containsKey(DEFAULT_SINK_WINDOW_CHANGE_LOG_ADDITIONAL_RETENTION)) { + ksqlConfigProps.put(SINK_WINDOW_CHANGE_LOG_ADDITIONAL_RETENTION, + Long.parseLong(props.get + (DEFAULT_SINK_WINDOW_CHANGE_LOG_ADDITIONAL_RETENTION).toString())); + } else { + ksqlConfigProps.put(SINK_WINDOW_CHANGE_LOG_ADDITIONAL_RETENTION, + defaultSinkWindowChangeLogAdditionalRetention); + } + + ksqlConfigProps.put(ConsumerConfig.AUTO_OFFSET_RESET_CONFIG, defaultAutoOffsetRestConfig); + ksqlConfigProps.put(StreamsConfig.COMMIT_INTERVAL_MS_CONFIG, defaultCommitIntervalMsConfig); + ksqlConfigProps.put( + StreamsConfig.CACHE_MAX_BYTES_BUFFERING_CONFIG, defaultCacheMaxBytesBufferingConfig); + ksqlConfigProps.put(StreamsConfig.NUM_STREAM_THREADS_CONFIG, defaultNumberOfStreamsThreads); + + + + + for (Object propKey: props.keySet()) { + ksqlConfigProps.put(propKey.toString(), props.get(propKey)); + } + + } + + protected KsqlConfig(ConfigDef config, Map props) { + super(config, props); + } + + public Map getStreamsProperties() { + return (Map) new StreamsConfig(originals()).values(); + } + + public Map getKsqlConfigProps() { + return ksqlConfigProps; + } + + public Object get(String propertyName) { + return ksqlConfigProps.get(propertyName); + } + + public void put(String propertyName, Object propertyValue) { + ksqlConfigProps.put(propertyName, propertyValue); + } + + public KsqlConfig clone() { + return new KsqlConfig(this.ksqlConfigProps); + } +} diff --git a/ksql-core/src/main/java/io/confluent/ksql/util/KsqlException.java b/ksql-core/src/main/java/io/confluent/ksql/util/KsqlException.java new file mode 100644 index 000000000000..b335b5749fc3 --- /dev/null +++ b/ksql-core/src/main/java/io/confluent/ksql/util/KsqlException.java @@ -0,0 +1,18 @@ +/** + * Copyright 2017 Confluent Inc. + **/ + +package io.confluent.ksql.util; + +import org.apache.kafka.streams.errors.StreamsException; + +public class KsqlException extends StreamsException { + + public KsqlException(String message) { + super(message); + } + + public KsqlException(String message, Throwable throwable) { + super(message, throwable); + } +} diff --git a/ksql-core/src/main/java/io/confluent/ksql/util/KsqlPreconditions.java b/ksql-core/src/main/java/io/confluent/ksql/util/KsqlPreconditions.java new file mode 100644 index 000000000000..081cf2588186 --- /dev/null +++ b/ksql-core/src/main/java/io/confluent/ksql/util/KsqlPreconditions.java @@ -0,0 +1,25 @@ +/** + * Copyright 2017 Confluent Inc. + **/ + +package io.confluent.ksql.util; + +import javax.annotation.Nullable; + +public class KsqlPreconditions { + + public static T checkNotNull(T reference, @Nullable Object errorMessage) { + if (reference == null) { + throw new KsqlException(String.valueOf(errorMessage)); + } else { + return reference; + } + } + + public static void checkArgument(boolean expression, @Nullable Object errorMessage) { + if (!expression) { + throw new KsqlException(String.valueOf(errorMessage)); + } + } + +} \ No newline at end of file diff --git a/ksql-core/src/main/java/io/confluent/ksql/util/Pair.java b/ksql-core/src/main/java/io/confluent/ksql/util/Pair.java new file mode 100644 index 000000000000..5ce727a63e82 --- /dev/null +++ b/ksql-core/src/main/java/io/confluent/ksql/util/Pair.java @@ -0,0 +1,24 @@ +/** + * Copyright 2017 Confluent Inc. + **/ + +package io.confluent.ksql.util; + +public class Pair { + + public final T1 left; + public final T2 right; + + public Pair(T1 left, T2 right) { + this.left = left; + this.right = right; + } + + public T1 getLeft() { + return left; + } + + public T2 getRight() { + return right; + } +} diff --git a/ksql-core/src/main/java/io/confluent/ksql/util/PersistentQueryMetadata.java b/ksql-core/src/main/java/io/confluent/ksql/util/PersistentQueryMetadata.java new file mode 100644 index 000000000000..724c156361fd --- /dev/null +++ b/ksql-core/src/main/java/io/confluent/ksql/util/PersistentQueryMetadata.java @@ -0,0 +1,45 @@ +/** + * Copyright 2017 Confluent Inc. + **/ + +package io.confluent.ksql.util; + +import io.confluent.ksql.metastore.DataSource; +import io.confluent.ksql.planner.plan.OutputNode; +import org.apache.kafka.streams.KafkaStreams; + +import java.util.Objects; + +public class PersistentQueryMetadata extends QueryMetadata { + + private final long id; + + + public PersistentQueryMetadata(String statementString, KafkaStreams kafkaStreams, + OutputNode outputNode, String executionPlan, long id, + DataSource.DataSourceType dataSourceType) { + super(statementString, kafkaStreams, outputNode, executionPlan, dataSourceType); + this.id = id; + + } + + public long getId() { + return id; + } + + @Override + public boolean equals(Object o) { + if (!(o instanceof PersistentQueryMetadata)) { + return false; + } + + PersistentQueryMetadata that = (PersistentQueryMetadata) o; + + return Objects.equals(this.id, that.id) && super.equals(o); + } + + @Override + public int hashCode() { + return Objects.hash(id, super.hashCode()); + } +} diff --git a/ksql-core/src/main/java/io/confluent/ksql/util/QueryMetadata.java b/ksql-core/src/main/java/io/confluent/ksql/util/QueryMetadata.java new file mode 100644 index 000000000000..72a0fa5c799d --- /dev/null +++ b/ksql-core/src/main/java/io/confluent/ksql/util/QueryMetadata.java @@ -0,0 +1,67 @@ +/** + * Copyright 2017 Confluent Inc. + **/ + +package io.confluent.ksql.util; + +import io.confluent.ksql.metastore.DataSource; +import io.confluent.ksql.planner.plan.OutputNode; +import org.apache.kafka.streams.KafkaStreams; + +import java.util.Objects; + +public class QueryMetadata { + private final String statementString; + private final KafkaStreams kafkaStreams; + private final OutputNode outputNode; + private final String executionPlan; + private final DataSource.DataSourceType dataSourceType; + + public QueryMetadata(String statementString, KafkaStreams kafkaStreams, OutputNode outputNode, + String executionPlan, + DataSource.DataSourceType dataSourceType) { + this.statementString = statementString; + this.kafkaStreams = kafkaStreams; + this.outputNode = outputNode; + this.executionPlan = executionPlan; + this.dataSourceType = dataSourceType; + } + + public String getStatementString() { + return statementString; + } + + public KafkaStreams getKafkaStreams() { + return kafkaStreams; + } + + public OutputNode getOutputNode() { + return outputNode; + } + + public String getExecutionPlan() { + return executionPlan; + } + + public DataSource.DataSourceType getDataSourceType() { + return dataSourceType; + } + + @Override + public boolean equals(Object o) { + if (!(o instanceof QueryMetadata)) { + return false; + } + + QueryMetadata that = (QueryMetadata) o; + + return Objects.equals(this.statementString, that.statementString) + && Objects.equals(this.kafkaStreams, that.kafkaStreams) + && Objects.equals(this.outputNode, that.outputNode); + } + + @Override + public int hashCode() { + return Objects.hash(kafkaStreams, outputNode); + } +} diff --git a/ksql-core/src/main/java/io/confluent/ksql/util/QueuedQueryMetadata.java b/ksql-core/src/main/java/io/confluent/ksql/util/QueuedQueryMetadata.java new file mode 100644 index 000000000000..f7dd65eab0be --- /dev/null +++ b/ksql-core/src/main/java/io/confluent/ksql/util/QueuedQueryMetadata.java @@ -0,0 +1,51 @@ +/** + * Copyright 2017 Confluent Inc. + **/ + +package io.confluent.ksql.util; + +import io.confluent.ksql.metastore.DataSource; +import io.confluent.ksql.physical.GenericRow; +import io.confluent.ksql.planner.plan.OutputNode; +import org.apache.kafka.streams.KafkaStreams; +import org.apache.kafka.streams.KeyValue; + +import java.util.Objects; +import java.util.concurrent.SynchronousQueue; + +public class QueuedQueryMetadata extends QueryMetadata { + + private final SynchronousQueue> rowQueue; + + public QueuedQueryMetadata( + String statementString, + KafkaStreams kafkaStreams, + OutputNode outputNode, + String executionPlan, + SynchronousQueue> rowQueue, + DataSource.DataSourceType dataSourceType + ) { + super(statementString, kafkaStreams, outputNode, executionPlan, dataSourceType); + this.rowQueue = rowQueue; + } + + public SynchronousQueue> getRowQueue() { + return rowQueue; + } + + @Override + public boolean equals(Object o) { + if (!(o instanceof QueuedQueryMetadata)) { + return false; + } + + QueuedQueryMetadata that = (QueuedQueryMetadata) o; + + return Objects.equals(this.rowQueue, that.rowQueue) && super.equals(o); + } + + @Override + public int hashCode() { + return Objects.hash(rowQueue, super.hashCode()); + } +} diff --git a/ksql-core/src/main/java/io/confluent/ksql/util/SchemaUtil.java b/ksql-core/src/main/java/io/confluent/ksql/util/SchemaUtil.java new file mode 100644 index 000000000000..f8761ee75630 --- /dev/null +++ b/ksql-core/src/main/java/io/confluent/ksql/util/SchemaUtil.java @@ -0,0 +1,216 @@ +/** + * Copyright 2017 Confluent Inc. + **/ + +package io.confluent.ksql.util; + +import com.google.common.collect.ImmutableMap; +import org.apache.kafka.connect.data.Field; +import org.apache.kafka.connect.data.Schema; +import org.apache.kafka.connect.data.SchemaBuilder; + +import java.util.HashMap; +import java.util.HashSet; +import java.util.Optional; +import java.util.Set; + +public class SchemaUtil { + + + public static final String ROWKEY_NAME = "ROWKEY"; + public static final String ROWTIME_NAME = "ROWTIME"; + public static final int ROWKEY_NAME_INDEX = 1; + public static final int ROWTIME_NAME_INDEX = 0; + + public static Class getJavaType(final Schema schema) { + switch (schema.type()) { + case STRING: + return String.class; + case BOOLEAN: + return Boolean.class; + case INT32: + return Integer.class; + case INT64: + return Long.class; + case FLOAT64: + return Double.class; + case ARRAY: + Class elementClass = getJavaType(schema.valueSchema()); + return java.lang.reflect.Array.newInstance(elementClass, 0).getClass(); + case MAP: + return (new HashMap<>()).getClass(); + default: + throw new KsqlException("Type is not supported: " + schema.type()); + } + } + + + public static Schema getTypeSchema(final String ksqlType) { + switch (ksqlType) { + case "VARCHAR": + case "STRING": + return Schema.STRING_SCHEMA; + case "BOOLEAN": + return Schema.BOOLEAN_SCHEMA; + case "INTEGER": + return Schema.INT32_SCHEMA; + case "BIGINT": + return Schema.INT64_SCHEMA; + case "DOUBLE": + return Schema.FLOAT64_SCHEMA; + case "ARRAY": + return SchemaBuilder.array( + getTypeSchema(ksqlType.substring("ARRAY".length() + 1, ksqlType.length() - 1).trim())); + case "MAP": + return SchemaBuilder.map( + Schema.STRING_SCHEMA, + getTypeSchema(ksqlType.substring(ksqlType.indexOf(",") + 1, + ksqlType.length() - 1).trim())); + default: + throw new KsqlException("Type is not supported: " + ksqlType); + + } + } + + + public static Optional getFieldByName(final Schema schema, final String fieldName) { + if (schema.fields() != null) { + for (Field field : schema.fields()) { + if (field.name().equals(fieldName)) { + return Optional.of(field); + } else if (field.name().equals(fieldName.substring(fieldName.indexOf(".") + 1))) { + return Optional.of(field); + } + } + } + return Optional.empty(); + } + + public static int getFieldIndexByName(final Schema schema, final String fieldName) { + if (schema.fields() == null) { + return -1; + } + for (int i = 0; i < schema.fields().size(); i++) { + Field field = schema.fields().get(i); + int dotIndex = field.name().indexOf("."); + if (dotIndex == -1) { + if (field.name().equals(fieldName)) { + return i; + } + } else { + if (dotIndex < fieldName.length()) { + String + fieldNameWithDot = + fieldName.substring(0, dotIndex) + "." + fieldName.substring(dotIndex + 1); + if (field.name().equals(fieldNameWithDot)) { + return i; + } + } + } + + } + return -1; + } + + public static Schema buildSchemaWithAlias(final Schema schema, final String alias) { + SchemaBuilder newSchema = SchemaBuilder.struct().name(schema.name()); + for (Field field : schema.fields()) { + newSchema.field((alias + "." + field.name()), field.schema()); + } + return newSchema; + } + + public static final ImmutableMap TYPE_MAP = + new ImmutableMap.Builder() + .put("STRING", "VARCHAR(STRING)") + .put("INT64", "BIGINT") + .put("INT32", "INTEGER") + .put("FLOAT64", "DOUBLE") + .put("BOOLEAN", "BOOLEAN") + .put("ARRAY", "ARRAY") + .put("MAP", "MAP") + .build(); + + public static String getSchemaFieldName(Field field) { + if (field.schema().type() == Schema.Type.ARRAY) { + return "ARRAY[" + TYPE_MAP.get(field.schema().valueSchema().type().name()) + "]"; + } else if (field.schema().type() == Schema.Type.MAP) { + return "MAP[" + TYPE_MAP.get(field.schema().keySchema().type().name()) + "," + + TYPE_MAP.get(field.schema().valueSchema().type().name()) + "]"; + } else { + return TYPE_MAP.get(field.schema().type().name()); + } + } + + public static String getJavaCastString(Schema schema) { + switch (schema.type()) { + case INT32: + return "(Integer)"; + case INT64: + return "(Long)"; + case FLOAT64: + return "(Double)"; + case STRING: + return "(String)"; + case BOOLEAN: + return "(Boolean)"; + default: + //TODO: Add complex types later! + return ""; + } + } + + public static synchronized Schema addImplicitRowTimeRowKeyToSchema(Schema schema) { + SchemaBuilder schemaBuilder = SchemaBuilder.struct(); + schemaBuilder.field(SchemaUtil.ROWTIME_NAME, Schema.INT64_SCHEMA); + schemaBuilder.field(SchemaUtil.ROWKEY_NAME, Schema.STRING_SCHEMA); + for (Field field: schema.fields()) { + if (!field.name().equals(SchemaUtil.ROWKEY_NAME) && !field.name().equals(SchemaUtil + .ROWTIME_NAME)) { + schemaBuilder.field(field.name(), field.schema()); + } + } + return schemaBuilder.build(); + } + + public static synchronized Schema removeImplicitRowTimeRowKeyFromSchema(Schema schema) { + SchemaBuilder schemaBuilder = SchemaBuilder.struct(); + for (Field field: schema.fields()) { + String fieldName = field.name(); + fieldName = fieldName.substring(fieldName.indexOf(".") + 1); + if (!fieldName.equalsIgnoreCase(SchemaUtil.ROWTIME_NAME) + && !fieldName.equalsIgnoreCase(SchemaUtil.ROWKEY_NAME)) { + schemaBuilder.field(fieldName, field.schema()); + } + } + return schemaBuilder.build(); + } + + public static synchronized Set getRowTimeRowKeyIndexes(Schema schema) { + Set indexSet = new HashSet(); + for (int i = 0; i < schema.fields().size(); i++) { + Field field = schema.fields().get(i); + if (field.name().equalsIgnoreCase(SchemaUtil.ROWTIME_NAME) + || field.name().equalsIgnoreCase(SchemaUtil.ROWKEY_NAME)) { + indexSet.add(i); + } + } + return indexSet; + } + + public static synchronized String getSchemaDefinitionString(Schema schema) { + StringBuilder stringBuilder = new StringBuilder("["); + boolean addComma = false; + for (Field field : schema.fields()) { + if (addComma) { + stringBuilder.append(" , "); + } else { + addComma = true; + } + stringBuilder.append(field.name() + " : " + field.schema().type()); + } + stringBuilder.append("]"); + return stringBuilder.toString(); + } + +} diff --git a/ksql-core/src/main/java/io/confluent/ksql/util/SerDeUtil.java b/ksql-core/src/main/java/io/confluent/ksql/util/SerDeUtil.java new file mode 100644 index 000000000000..d88d545a5849 --- /dev/null +++ b/ksql-core/src/main/java/io/confluent/ksql/util/SerDeUtil.java @@ -0,0 +1,86 @@ +/** + * Copyright 2017 Confluent Inc. + **/ + +package io.confluent.ksql.util; + +import io.confluent.ksql.ddl.DdlConfig; +import io.confluent.ksql.metastore.MetastoreUtil; +import io.confluent.ksql.physical.GenericRow; +import io.confluent.ksql.serde.KsqlTopicSerDe; +import io.confluent.ksql.serde.avro.KsqlAvroTopicSerDe; +import io.confluent.ksql.serde.avro.KsqlGenericRowAvroDeserializer; +import io.confluent.ksql.serde.avro.KsqlGenericRowAvroSerializer; +import io.confluent.ksql.serde.delimited.KsqlDelimitedDeserializer; +import io.confluent.ksql.serde.delimited.KsqlDelimitedSerializer; +import io.confluent.ksql.serde.delimited.KsqlDelimitedTopicSerDe; +import io.confluent.ksql.serde.json.KsqlJsonDeserializer; +import io.confluent.ksql.serde.json.KsqlJsonSerializer; +import io.confluent.ksql.serde.json.KsqlJsonTopicSerDe; +import org.apache.kafka.common.serialization.Deserializer; +import org.apache.kafka.common.serialization.Serde; +import org.apache.kafka.common.serialization.Serdes; +import org.apache.kafka.common.serialization.Serializer; +import org.apache.kafka.connect.data.Schema; + +import java.util.HashMap; +import java.util.Map; + + +public class SerDeUtil { + + public static Serde getGenericRowJsonSerde(Schema schema) { + Map serdeProps = new HashMap<>(); + serdeProps.put("JsonPOJOClass", GenericRow.class); + + final Serializer genericRowSerializer = new KsqlJsonSerializer(schema); + genericRowSerializer.configure(serdeProps, false); + + final Deserializer genericRowDeserializer = new KsqlJsonDeserializer(schema); + genericRowDeserializer.configure(serdeProps, false); + + return Serdes.serdeFrom(genericRowSerializer, genericRowDeserializer); + + } + + private static Serde getGenericRowDelimitedSerde(final Schema schema) { + Map serdeProps = new HashMap<>(); + + final Serializer genericRowSerializer = new KsqlDelimitedSerializer(); + genericRowSerializer.configure(serdeProps, false); + + final Deserializer genericRowDeserializer = new KsqlDelimitedDeserializer(schema); + genericRowDeserializer.configure(serdeProps, false); + + return Serdes.serdeFrom(genericRowSerializer, genericRowDeserializer); + } + + public static Serde getGenericRowAvroSerde(final Schema schema) { + Map serdeProps = new HashMap<>(); + String avroSchemaString = new MetastoreUtil().buildAvroSchema(schema, DdlConfig.AVRO_SCHEMA); + serdeProps.put(KsqlGenericRowAvroSerializer.AVRO_SERDE_SCHEMA_CONFIG, avroSchemaString); + + final Serializer genericRowSerializer = new KsqlGenericRowAvroSerializer(schema); + genericRowSerializer.configure(serdeProps, false); + + final Deserializer genericRowDeserializer = + new KsqlGenericRowAvroDeserializer(schema); + genericRowDeserializer.configure(serdeProps, false); + + return Serdes.serdeFrom(genericRowSerializer, genericRowDeserializer); + } + + public static Serde getRowSerDe(final KsqlTopicSerDe topicSerDe, Schema schema) { + if (topicSerDe instanceof KsqlAvroTopicSerDe) { + KsqlAvroTopicSerDe avroTopicSerDe = (KsqlAvroTopicSerDe) topicSerDe; + return SerDeUtil.getGenericRowAvroSerde(schema); + } else if (topicSerDe instanceof KsqlJsonTopicSerDe) { + return SerDeUtil.getGenericRowJsonSerde(schema); + } else if (topicSerDe instanceof KsqlDelimitedTopicSerDe) { + return SerDeUtil.getGenericRowDelimitedSerde(schema); + } else { + throw new KsqlException("Unknown topic serde."); + } + } + +} diff --git a/ksql-core/src/main/java/io/confluent/ksql/util/StringUtil.java b/ksql-core/src/main/java/io/confluent/ksql/util/StringUtil.java new file mode 100644 index 000000000000..72e939ae0eb1 --- /dev/null +++ b/ksql-core/src/main/java/io/confluent/ksql/util/StringUtil.java @@ -0,0 +1,35 @@ +/** +* Copyright 2017 Confluent Inc. +**/ + +package io.confluent.ksql.util; + +import java.util.List; + +public class StringUtil { + + public static String cleanQuotes(final String stringWithQuotes) { + // TODO: move check to grammer + if (stringWithQuotes.startsWith("'") && stringWithQuotes.endsWith("'")) { + return stringWithQuotes.substring(1, stringWithQuotes.length() - 1); + } else { + throw new KsqlException(stringWithQuotes + + " value is string and should be enclosed between " + + "' ."); + } + } + + public static String join(String delimiter, List objs) { + StringBuilder sb = new StringBuilder(); + int cnt = 0; + for (Object obj : objs) { + if (cnt > 0) { + sb.append(delimiter); + } + sb.append(obj); + cnt += 1; + } + return sb.toString(); + } + +} diff --git a/ksql-core/src/main/java/io/confluent/ksql/util/Version.java b/ksql-core/src/main/java/io/confluent/ksql/util/Version.java new file mode 100644 index 000000000000..bbb06c98eb02 --- /dev/null +++ b/ksql-core/src/main/java/io/confluent/ksql/util/Version.java @@ -0,0 +1,33 @@ +/** + * Copyright 2017 Confluent Inc. + **/ + +package io.confluent.ksql.util; + +import org.slf4j.Logger; +import org.slf4j.LoggerFactory; + +import java.util.Properties; + +public class Version { + private static final Logger log = LoggerFactory.getLogger(Version.class); + private static String version = "unknown"; + + static { + try { + Properties props = new Properties(); + props.load(Version.class.getResourceAsStream("/ksql-version.properties")); + version = props.getProperty("version", version).trim(); + } catch (Exception e) { + log.warn("Error while loading version:", e); + } + } + + public static String getVersion() { + return version; + } + + public static void main(String[] args) { + System.err.println(getVersion()); + } +} \ No newline at end of file diff --git a/ksql-core/src/main/java/io/confluent/ksql/util/WindowedSerde.java b/ksql-core/src/main/java/io/confluent/ksql/util/WindowedSerde.java new file mode 100644 index 000000000000..985098412d18 --- /dev/null +++ b/ksql-core/src/main/java/io/confluent/ksql/util/WindowedSerde.java @@ -0,0 +1,49 @@ +/** + * Copyright 2017 Confluent Inc. + **/ + +package io.confluent.ksql.util; + +import org.apache.kafka.common.serialization.Deserializer; +import org.apache.kafka.common.serialization.Serde; +import org.apache.kafka.common.serialization.Serializer; +import org.apache.kafka.common.serialization.StringDeserializer; +import org.apache.kafka.common.serialization.StringSerializer; +import org.apache.kafka.streams.kstream.Windowed; +import org.apache.kafka.streams.kstream.internals.WindowedDeserializer; +import org.apache.kafka.streams.kstream.internals.WindowedSerializer; + +import java.util.Map; + +public class WindowedSerde implements Serde> { + + private final Serializer> serializer; + private final Deserializer> deserializer; + + public WindowedSerde() { + serializer = new WindowedSerializer<>(new StringSerializer()); + deserializer = new WindowedDeserializer<>(new StringDeserializer()); + } + + @Override + public void configure(Map configs, boolean isKey) { + serializer.configure(configs, isKey); + deserializer.configure(configs, isKey); + } + + @Override + public void close() { + serializer.close(); + deserializer.close(); + } + + @Override + public Serializer> serializer() { + return serializer; + } + + @Override + public Deserializer> deserializer() { + return deserializer; + } +} diff --git a/ksql-core/src/main/java/io/confluent/ksql/util/json/JsonPathTokenizer.java b/ksql-core/src/main/java/io/confluent/ksql/util/json/JsonPathTokenizer.java new file mode 100644 index 000000000000..143bcd860bfd --- /dev/null +++ b/ksql-core/src/main/java/io/confluent/ksql/util/json/JsonPathTokenizer.java @@ -0,0 +1,179 @@ +/** + * Copyright 2017 Confluent Inc. + **/ + +package io.confluent.ksql.util.json; + +import com.google.common.base.VerifyException; +import com.google.common.collect.AbstractIterator; +import io.confluent.ksql.util.KsqlException; + +import static com.google.common.base.Verify.verify; +import static java.lang.Character.isLetterOrDigit; +import static java.lang.String.format; +import static java.util.Objects.requireNonNull; + +public class JsonPathTokenizer + extends AbstractIterator { + private static final char QUOTE = '\"'; + private static final char BACKSLASH = '\\'; + private static final char DOT = '.'; + private static final char OPEN_BRACKET = '['; + private static final char CLOSE_BRACKET = ']'; + private static final char UNICODE_CARET = '\u2038'; + + private final String path; + private int index; + + public JsonPathTokenizer(String path) { + this.path = requireNonNull(path, "path is null"); + + if (path.isEmpty()) { + throw invalidJsonPath(); + } + + // skip the start token + match('$'); + } + + @Override + protected String computeNext() { + if (!hasNextCharacter()) { + return endOfData(); + } + + if (tryMatch(DOT)) { + return matchPathSegment(); + } + + if (tryMatch(OPEN_BRACKET)) { + String token = tryMatch(QUOTE) ? matchQuotedSubscript() : matchUnquotedSubscript(); + + match(CLOSE_BRACKET); + return token; + } + + throw invalidJsonPath(); + } + + private String matchPathSegment() { + // seek until we see a special character or whitespace + int start = index; + while (hasNextCharacter() && isUnquotedPathCharacter(peekCharacter())) { + nextCharacter(); + } + int end = index; + + String token = path.substring(start, end); + + // an empty unquoted token is not allowed + if (token.isEmpty()) { + throw invalidJsonPath(); + } + + return token; + } + + private static boolean isUnquotedPathCharacter(char c) { + return c == ':' || isUnquotedSubscriptCharacter(c); + } + + private String matchUnquotedSubscript() { + // seek until we see a special character or whitespace + int start = index; + while (hasNextCharacter() && isUnquotedSubscriptCharacter(peekCharacter())) { + nextCharacter(); + } + int end = index; + + String token = path.substring(start, end); + + // an empty unquoted token is not allowed + if (token.isEmpty()) { + throw invalidJsonPath(); + } + + return token; + } + + private static boolean isUnquotedSubscriptCharacter(char c) { + return c == '_' || isLetterOrDigit(c); + } + + private String matchQuotedSubscript() { + // quote has already been matched + + // seek until we see the close quote + StringBuilder token = new StringBuilder(); + boolean escaped = false; + + while (hasNextCharacter() && (escaped || peekCharacter() != QUOTE)) { + if (escaped) { + switch (peekCharacter()) { + case QUOTE: + case BACKSLASH: + token.append(peekCharacter()); + break; + default: + throw invalidJsonPath(); + } + escaped = false; + } else { + switch (peekCharacter()) { + case BACKSLASH: + escaped = true; + break; + case QUOTE: + throw new VerifyException("Should be handled by loop condition"); + default: + token.append(peekCharacter()); + } + } + nextCharacter(); + } + if (escaped) { + verify(!hasNextCharacter(), "Loop terminated after escape while there is still input"); + throw invalidJsonPath(); + } + + match(QUOTE); + + return token.toString(); + } + + private boolean hasNextCharacter() { + return index < path.length(); + } + + private void match(char expected) { + if (!tryMatch(expected)) { + throw invalidJsonPath(); + } + } + + private boolean tryMatch(char expected) { + if (!hasNextCharacter() || peekCharacter() != expected) { + return false; + } + index++; + return true; + } + + private void nextCharacter() { + index++; + } + + private char peekCharacter() { + return path.charAt(index); + } + + private KsqlException invalidJsonPath() { + return new KsqlException(format("Invalid JSON path: '%s'", path)); + } + + @Override + public String toString() { + return path.substring(0, index) + UNICODE_CARET + path.substring(index); + } +} + diff --git a/ksql-core/src/main/java/io/confluent/ksql/util/timestamp/KsqlTimestampExtractor.java b/ksql-core/src/main/java/io/confluent/ksql/util/timestamp/KsqlTimestampExtractor.java new file mode 100644 index 000000000000..46361443cb5f --- /dev/null +++ b/ksql-core/src/main/java/io/confluent/ksql/util/timestamp/KsqlTimestampExtractor.java @@ -0,0 +1,48 @@ +/** + * Copyright 2017 Confluent Inc. + **/ + +package io.confluent.ksql.util.timestamp; + +import io.confluent.ksql.physical.GenericRow; +import io.confluent.ksql.util.KsqlConfig; +import org.apache.kafka.clients.consumer.ConsumerRecord; +import org.apache.kafka.common.Configurable; +import org.apache.kafka.streams.processor.TimestampExtractor; +import org.slf4j.Logger; +import org.slf4j.LoggerFactory; + +import java.util.Map; + +public class KsqlTimestampExtractor implements TimestampExtractor, Configurable { + + private static final Logger log = LoggerFactory.getLogger(KsqlTimestampExtractor.class); + + int timestampColumnindex = -1; + + @Override + public void configure(Map map) { + if (map.containsKey(KsqlConfig.KSQL_TIMESTAMP_COLUMN_INDEX)) { + timestampColumnindex = (Integer) map.get(KsqlConfig.KSQL_TIMESTAMP_COLUMN_INDEX); + } + } + + @Override + public long extract(ConsumerRecord consumerRecord, long l) { + if (timestampColumnindex < 0) { + return 0; + } else { + try { + if (consumerRecord.value() instanceof GenericRow) { + GenericRow genericRow = (GenericRow) consumerRecord.value(); + if (genericRow.getColumns().get(timestampColumnindex) instanceof Long) { + return (long) genericRow.getColumns().get(timestampColumnindex); + } + } + } catch (Exception e) { + log.error("Exception in extracting timestamp for row: " + consumerRecord.value(), e); + } + } + return 0; + } +} diff --git a/ksql-core/src/main/resources/checkstyle.xml b/ksql-core/src/main/resources/checkstyle.xml new file mode 100644 index 000000000000..7470ffe4b766 --- /dev/null +++ b/ksql-core/src/main/resources/checkstyle.xml @@ -0,0 +1,84 @@ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + \ No newline at end of file diff --git a/ksql-core/src/main/resources/ksql-version.properties b/ksql-core/src/main/resources/ksql-version.properties new file mode 100644 index 000000000000..8796076317e5 --- /dev/null +++ b/ksql-core/src/main/resources/ksql-version.properties @@ -0,0 +1 @@ +version=0.0.1 diff --git a/ksql-core/src/main/resources/log4j.properties b/ksql-core/src/main/resources/log4j.properties new file mode 100644 index 000000000000..3c3df353d538 --- /dev/null +++ b/ksql-core/src/main/resources/log4j.properties @@ -0,0 +1,5 @@ +log4j.rootLogger=WARN,stdout + +log4j.appender.stdout=org.apache.log4j.ConsoleAppender +log4j.appender.stdout.layout=org.apache.log4j.PatternLayout +log4j.appender.stdout.layout.ConversionPattern=[%d] %p %m (%c:%L)%n diff --git a/ksql-core/src/test/java/io/confluent/ksql/analyzer/AggregateAnalyzerTest.java b/ksql-core/src/test/java/io/confluent/ksql/analyzer/AggregateAnalyzerTest.java new file mode 100644 index 000000000000..0776e2cbcb10 --- /dev/null +++ b/ksql-core/src/test/java/io/confluent/ksql/analyzer/AggregateAnalyzerTest.java @@ -0,0 +1,213 @@ +package io.confluent.ksql.analyzer; + +import io.confluent.ksql.metastore.MetaStore; +import io.confluent.ksql.parser.KsqlParser; +import io.confluent.ksql.parser.rewrite.AggregateExpressionRewriter; +import io.confluent.ksql.parser.tree.ComparisonExpression; +import io.confluent.ksql.parser.tree.Expression; +import io.confluent.ksql.parser.tree.ExpressionTreeRewriter; +import io.confluent.ksql.parser.tree.Statement; +import io.confluent.ksql.util.KsqlTestUtil; +import org.junit.Assert; +import org.junit.Before; +import org.junit.Test; + +import java.util.List; + +public class AggregateAnalyzerTest { + + private static final KsqlParser KSQL_PARSER = new KsqlParser(); + private MetaStore metaStore; + + @Before + public void init() { + metaStore = KsqlTestUtil.getNewMetaStore(); + } + + private Analysis analyze(final String queryStr) { + List statements = KSQL_PARSER.buildAst(queryStr, metaStore); +// System.out.println(SqlFormatterQueryRewrite.formatSql(statements.get(0)).replace("\n", " ")); + // Analyze the query to resolve the references and extract oeprations + Analysis analysis = new Analysis(); + Analyzer analyzer = new Analyzer(analysis, metaStore); + analyzer.process(statements.get(0), new AnalysisContext(null, null)); + return analysis; + } + + private AggregateAnalysis analyzeAggregates(final String queryStr) { + System.out.println("Test query:" + queryStr); + Analysis analysis = analyze(queryStr); + AggregateAnalysis aggregateAnalysis = new AggregateAnalysis(); + AggregateAnalyzer aggregateAnalyzer = new AggregateAnalyzer(aggregateAnalysis, metaStore, analysis); + AggregateExpressionRewriter aggregateExpressionRewriter = new AggregateExpressionRewriter(); + for (Expression expression: analysis.getSelectExpressions()) { + aggregateAnalyzer.process(expression, new AnalysisContext(null, null)); + if (!aggregateAnalyzer.isHasAggregateFunction()) { + aggregateAnalysis.getNonAggResultColumns().add(expression); + } + aggregateAnalysis.getFinalSelectExpressions().add( + ExpressionTreeRewriter.rewriteWith(aggregateExpressionRewriter, expression)); + aggregateAnalyzer.setHasAggregateFunction(false); + } + + if (analysis.getHavingExpression() != null) { + aggregateAnalyzer.process(analysis.getHavingExpression(), new AnalysisContext(null, null)); + if (!aggregateAnalyzer.isHasAggregateFunction()) { + aggregateAnalysis.getNonAggResultColumns().add(analysis.getHavingExpression()); + } + aggregateAnalysis.setHavingExpression(ExpressionTreeRewriter.rewriteWith(aggregateExpressionRewriter, + analysis.getHavingExpression())); + aggregateAnalyzer.setHasAggregateFunction(false); + } + + return aggregateAnalysis; + } + + @Test + public void testSimpleAggregateQueryAnalysis() throws Exception { + String queryStr = "SELECT col1, count(col1) FROM test1 WHERE col0 > 100 group by col1;"; + AggregateAnalysis aggregateAnalysis = analyzeAggregates(queryStr); + Assert.assertNotNull(aggregateAnalysis); + Assert.assertTrue(aggregateAnalysis.getFunctionList().size() == 1); + Assert.assertTrue(aggregateAnalysis.getFunctionList().get(0).getName().getSuffix() + .equalsIgnoreCase("count")); + Assert.assertTrue(aggregateAnalysis.getAggregateFunctionArguments().get(0).toString() + .equalsIgnoreCase("test1.col1")); + Assert.assertTrue(aggregateAnalysis.getRequiredColumnsList().size() == 1); + Assert.assertTrue(aggregateAnalysis.getRequiredColumnsList().get(0).toString() + .equalsIgnoreCase("test1.col1")); + Assert.assertTrue(aggregateAnalysis.getRequiredColumnsMap().size() == 1); + Assert.assertTrue(aggregateAnalysis.getFinalSelectExpressions().size() == 2); + + } + + @Test + public void testMultipleAggregateQueryAnalysis() throws Exception { + String queryStr = "SELECT col1, sum(col3), count(col1) FROM test1 WHERE col0 > 100 group by " + + "col1;"; + AggregateAnalysis aggregateAnalysis = analyzeAggregates(queryStr); + Assert.assertTrue(aggregateAnalysis.getFunctionList().size() == 2); + Assert.assertTrue(aggregateAnalysis.getFunctionList().get(0).getName().getSuffix() + .equalsIgnoreCase("sum")); + Assert.assertTrue(aggregateAnalysis.getFunctionList().get(1).getName().getSuffix() + .equalsIgnoreCase("count")); + Assert.assertTrue(aggregateAnalysis.getNonAggResultColumns().size() == 1); + Assert.assertTrue(aggregateAnalysis.getNonAggResultColumns().get(0).toString() + .equalsIgnoreCase("test1.col1")); + Assert.assertTrue(aggregateAnalysis.getAggregateFunctionArguments().get(0).toString() + .equalsIgnoreCase("test1.col3")); + Assert.assertTrue(aggregateAnalysis.getAggregateFunctionArguments().get(1).toString() + .equalsIgnoreCase("test1.col1")); + Assert.assertTrue(aggregateAnalysis.getFinalSelectExpressions().size() == 3); + Assert.assertTrue(aggregateAnalysis.getFinalSelectExpressions().get(0).toString() + .equalsIgnoreCase("test1.col1")); + Assert.assertTrue(aggregateAnalysis.getRequiredColumnsList().size() == 2); + Assert.assertTrue(aggregateAnalysis.getRequiredColumnsList().get(1).toString() + .equalsIgnoreCase("test1.col3")); + } + + @Test + public void testExpressionArgAggregateQueryAnalysis() { + String queryStr = "SELECT col1, sum(col3*col0), sum(floor(col3)*3.0) FROM test1 window w " + + "TUMBLING ( size 2 second) WHERE col0 > " + + "100 " + + "group " + + "by " + + "col1;"; + AggregateAnalysis aggregateAnalysis = analyzeAggregates(queryStr); + Assert.assertTrue(aggregateAnalysis.getFunctionList().size() == 2); + Assert.assertTrue(aggregateAnalysis.getFunctionList().get(0).getName().getSuffix() + .equalsIgnoreCase("sum")); + Assert.assertTrue(aggregateAnalysis.getFunctionList().get(1).getName().getSuffix() + .equalsIgnoreCase("sum")); + Assert.assertTrue(aggregateAnalysis.getAggregateFunctionArguments().size() == 2); + Assert.assertTrue(aggregateAnalysis.getAggregateFunctionArguments().get(0).toString() + .equalsIgnoreCase("(TEST1.COL3 * TEST1.COL0)")); + Assert.assertTrue(aggregateAnalysis.getAggregateFunctionArguments().get(1).toString() + .equalsIgnoreCase("(FLOOR(TEST1.COL3) * 3.0)")); + Assert.assertTrue(aggregateAnalysis.getNonAggResultColumns().get(0).toString() + .equalsIgnoreCase("test1.col1")); + + Assert.assertTrue(aggregateAnalysis.getFinalSelectExpressions().size() == 3); + + Assert.assertTrue(aggregateAnalysis.getRequiredColumnsList().size() == 3); + Assert.assertTrue(aggregateAnalysis.getRequiredColumnsList().get(1).toString() + .equalsIgnoreCase("test1.col3")); + } + + @Test + public void testAggregateWithExpressionQueryAnalysis() { + String queryStr = "SELECT col1, sum(col3*col0)/count(col1), sum(floor(col3)*3.0) FROM test1 " + + "window w " + + "TUMBLING ( size 2 second) WHERE col0 > " + + "100 " + + "group " + + "by " + + "col1;"; + AggregateAnalysis aggregateAnalysis = analyzeAggregates(queryStr); + Assert.assertTrue(aggregateAnalysis.getFunctionList().size() == 3); + Assert.assertTrue(aggregateAnalysis.getFunctionList().get(0).getName().getSuffix() + .equalsIgnoreCase("sum")); + Assert.assertTrue(aggregateAnalysis.getFunctionList().get(1).getName().getSuffix() + .equalsIgnoreCase("count")); + Assert.assertTrue(aggregateAnalysis.getFunctionList().get(2).getName().getSuffix() + .equalsIgnoreCase("sum")); + + Assert.assertTrue(aggregateAnalysis.getAggregateFunctionArguments().size() == 3); + Assert.assertTrue(aggregateAnalysis.getAggregateFunctionArguments().get(0).toString() + .equalsIgnoreCase("(TEST1.COL3 * TEST1.COL0)")); + Assert.assertTrue(aggregateAnalysis.getAggregateFunctionArguments().get(1).toString() + .equalsIgnoreCase("TEST1.COL1")); + Assert.assertTrue(aggregateAnalysis.getAggregateFunctionArguments().get(2).toString() + .equalsIgnoreCase("(FLOOR(TEST1.COL3) * 3.0)")); + Assert.assertTrue(aggregateAnalysis.getNonAggResultColumns().get(0).toString() + .equalsIgnoreCase("test1.col1")); + + Assert.assertTrue(aggregateAnalysis.getFinalSelectExpressions().size() == 3); + + Assert.assertTrue(aggregateAnalysis.getRequiredColumnsList().size() == 3); + Assert.assertTrue(aggregateAnalysis.getRequiredColumnsList().get(1).toString() + .equalsIgnoreCase("test1.col3")); + } + + @Test + public void testAggregateWithExpressionHavingQueryAnalysis() { + String queryStr = "SELECT col1, sum(col3*col0)/count(col1), sum(floor(col3)*3.0) FROM test1 " + + "window w " + + "TUMBLING ( size 2 second) WHERE col0 > " + + "100 " + + "group " + + "by " + + "col1 " + + "having count(col1) > 10;"; + AggregateAnalysis aggregateAnalysis = analyzeAggregates(queryStr); + Assert.assertTrue(aggregateAnalysis.getFunctionList().size() == 4); + Assert.assertTrue(aggregateAnalysis.getFunctionList().get(0).getName().getSuffix() + .equalsIgnoreCase("sum")); + Assert.assertTrue(aggregateAnalysis.getFunctionList().get(1).getName().getSuffix() + .equalsIgnoreCase("count")); + Assert.assertTrue(aggregateAnalysis.getFunctionList().get(2).getName().getSuffix() + .equalsIgnoreCase("sum")); + + Assert.assertTrue(aggregateAnalysis.getAggregateFunctionArguments().size() == 4); + Assert.assertTrue(aggregateAnalysis.getAggregateFunctionArguments().get(0).toString() + .equalsIgnoreCase("(TEST1.COL3 * TEST1.COL0)")); + Assert.assertTrue(aggregateAnalysis.getAggregateFunctionArguments().get(1).toString() + .equalsIgnoreCase("TEST1.COL1")); + Assert.assertTrue(aggregateAnalysis.getAggregateFunctionArguments().get(2).toString() + .equalsIgnoreCase("(FLOOR(TEST1.COL3) * 3.0)")); + Assert.assertTrue(aggregateAnalysis.getNonAggResultColumns().get(0).toString() + .equalsIgnoreCase("test1.col1")); + + Assert.assertTrue(aggregateAnalysis.getFinalSelectExpressions().size() == 3); + + Assert.assertTrue(aggregateAnalysis.getRequiredColumnsList().size() == 3); + Assert.assertTrue(aggregateAnalysis.getRequiredColumnsList().get(1).toString() + .equalsIgnoreCase("test1.col3")); + Assert.assertTrue(aggregateAnalysis.getHavingExpression() instanceof ComparisonExpression); + Assert.assertTrue(aggregateAnalysis.getHavingExpression().toString().equalsIgnoreCase("" + + "" + + "(KSQL_AGG_VARIABLE_3 > 10)")); + + } +} diff --git a/ksql-core/src/test/java/io/confluent/ksql/analyzer/AnalyzerTest.java b/ksql-core/src/test/java/io/confluent/ksql/analyzer/AnalyzerTest.java new file mode 100644 index 000000000000..a59fc2f052a9 --- /dev/null +++ b/ksql-core/src/test/java/io/confluent/ksql/analyzer/AnalyzerTest.java @@ -0,0 +1,188 @@ +package io.confluent.ksql.analyzer; + +import io.confluent.ksql.metastore.MetaStore; +import io.confluent.ksql.parser.KsqlParser; +import io.confluent.ksql.parser.rewrite.SqlFormatterQueryRewrite; +import io.confluent.ksql.parser.tree.Statement; +import io.confluent.ksql.util.KsqlTestUtil; +import org.junit.Assert; +import org.junit.Before; +import org.junit.Test; + +import java.util.List; + +public class AnalyzerTest { + + private static final KsqlParser KSQL_PARSER = new KsqlParser(); + + private MetaStore metaStore; + + @Before + public void init() { + metaStore = KsqlTestUtil.getNewMetaStore(); + } + + private Analysis analyze(String queryStr) { + List statements = KSQL_PARSER.buildAst(queryStr, metaStore); + System.out.println(SqlFormatterQueryRewrite.formatSql(statements.get(0)).replace("\n", " ")); + // Analyze the query to resolve the references and extract oeprations + Analysis analysis = new Analysis(); + Analyzer analyzer = new Analyzer(analysis, metaStore); + analyzer.process(statements.get(0), new AnalysisContext(null, null)); + return analysis; + } + + @Test + public void testSimpleQueryAnalysis() throws Exception { + String simpleQuery = "SELECT col0, col2, col3 FROM test1 WHERE col0 > 100;"; + Analysis analysis = analyze(simpleQuery); + Assert.assertNotNull("INTO is null", analysis.getInto()); + Assert.assertNotNull("FROM is null", analysis.getFromDataSources()); + Assert.assertNotNull("SELECT is null", analysis.getSelectExpressions()); + Assert.assertNotNull("SELECT aliacs is null", analysis.getSelectExpressionAlias()); + Assert.assertTrue("FROM was not analyzed correctly.", + analysis.getFromDataSources().get(0).getLeft().getName() + .equalsIgnoreCase("test1")); + Assert.assertTrue( + analysis.getSelectExpressions().size() == analysis.getSelectExpressionAlias().size()); + String + sqlStr = + SqlFormatterQueryRewrite.formatSql(analysis.getWhereExpression()).replace("\n", " "); + Assert.assertTrue(sqlStr.equalsIgnoreCase("(TEST1.COL0 > 100)")); + + String + select1 = + SqlFormatterQueryRewrite.formatSql(analysis.getSelectExpressions().get(0)) + .replace("\n", " "); + Assert.assertTrue(select1.equalsIgnoreCase("TEST1.COL0")); + String + select2 = + SqlFormatterQueryRewrite.formatSql(analysis.getSelectExpressions().get(1)) + .replace("\n", " "); + Assert.assertTrue(select2.equalsIgnoreCase("TEST1.COL2")); + String + select3 = + SqlFormatterQueryRewrite.formatSql(analysis.getSelectExpressions().get(2)) + .replace("\n", " "); + Assert.assertTrue(select3.equalsIgnoreCase("TEST1.COL3")); + + Assert.assertTrue(analysis.getSelectExpressionAlias().get(0).equalsIgnoreCase("COL0")); + Assert.assertTrue(analysis.getSelectExpressionAlias().get(1).equalsIgnoreCase("COL2")); + Assert.assertTrue(analysis.getSelectExpressionAlias().get(2).equalsIgnoreCase("COL3")); + } + + @Test + public void testSimpleLeftJoinAnalysis() throws Exception { + String + simpleQuery = + "SELECT t1.col1, t2.col1, t2.col4, col5, t2.col2 FROM test1 t1 LEFT JOIN test2 t2 ON " + + "t1.col1 = t2.col1;"; + Analysis analysis = analyze(simpleQuery); + Assert.assertNotNull("INTO is null", analysis.getInto()); + Assert.assertNotNull("JOIN is null", analysis.getJoin()); + + Assert.assertNotNull("SELECT is null", analysis.getSelectExpressions()); + Assert.assertNotNull("SELECT aliacs is null", analysis.getSelectExpressionAlias()); + Assert.assertTrue("JOIN left hand side was not analyzed correctly.", + analysis.getJoin().getLeftAlias().equalsIgnoreCase("t1")); + Assert.assertTrue("JOIN right hand side was not analyzed correctly.", + analysis.getJoin().getRightAlias().equalsIgnoreCase("t2")); + + Assert.assertTrue( + analysis.getSelectExpressions().size() == analysis.getSelectExpressionAlias().size()); + + Assert.assertTrue(analysis.getJoin().getLeftKeyFieldName().equalsIgnoreCase("COL1")); + Assert.assertTrue(analysis.getJoin().getRightKeyFieldName().equalsIgnoreCase("COL1")); + + String + select1 = + SqlFormatterQueryRewrite.formatSql(analysis.getSelectExpressions().get(0)) + .replace("\n", " "); + Assert.assertTrue(select1.equalsIgnoreCase("T1.COL1")); + String + select2 = + SqlFormatterQueryRewrite.formatSql(analysis.getSelectExpressions().get(1)) + .replace("\n", " "); + Assert.assertTrue(select2.equalsIgnoreCase("T2.COL1")); + String + select3 = + SqlFormatterQueryRewrite.formatSql(analysis.getSelectExpressions().get(2)) + .replace("\n", " "); + String + select4 = + SqlFormatterQueryRewrite.formatSql(analysis.getSelectExpressions().get(3)) + .replace("\n", " "); + Assert.assertTrue(select3.equalsIgnoreCase("T2.COL4")); + Assert.assertTrue(select4.equalsIgnoreCase("T1.COL5")); + + Assert.assertTrue(analysis.getSelectExpressionAlias().get(0).equalsIgnoreCase("T1_COL1")); + Assert.assertTrue(analysis.getSelectExpressionAlias().get(1).equalsIgnoreCase("T2_COL1")); + Assert.assertTrue(analysis.getSelectExpressionAlias().get(2).equalsIgnoreCase("T2_COL4")); + Assert.assertTrue(analysis.getSelectExpressionAlias().get(3).equalsIgnoreCase("COL5")); + Assert.assertTrue(analysis.getSelectExpressionAlias().get(4).equalsIgnoreCase("T2_COL2")); + + } + + @Test + public void testBooleanExpressionAnalysis() throws Exception { + String queryStr = "SELECT col0 = 10, col2, col3 > col1 FROM test1;"; + Analysis analysis = analyze(queryStr); + + Assert.assertNotNull("INTO is null", analysis.getInto()); + Assert.assertNotNull("FROM is null", analysis.getFromDataSources()); + Assert.assertNotNull("SELECT is null", analysis.getSelectExpressions()); + Assert.assertNotNull("SELECT aliacs is null", analysis.getSelectExpressionAlias()); + Assert.assertTrue("FROM was not analyzed correctly.", + analysis.getFromDataSources().get(0).getLeft().getName() + .equalsIgnoreCase("test1")); + + String + select1 = + SqlFormatterQueryRewrite.formatSql(analysis.getSelectExpressions().get(0)) + .replace("\n", " "); + Assert.assertTrue(select1.equalsIgnoreCase("(TEST1.COL0 = 10)")); + String + select2 = + SqlFormatterQueryRewrite.formatSql(analysis.getSelectExpressions().get(1)) + .replace("\n", " "); + Assert.assertTrue(select2.equalsIgnoreCase("TEST1.COL2")); + String + select3 = + SqlFormatterQueryRewrite.formatSql(analysis.getSelectExpressions().get(2)) + .replace("\n", " "); + Assert.assertTrue(select3.equalsIgnoreCase("(TEST1.COL3 > TEST1.COL1)")); + + } + + @Test + public void testFilterAnalysis() throws Exception { + String queryStr = "SELECT col0 = 10, col2, col3 > col1 FROM test1 WHERE col0 > 20;"; + Analysis analysis = analyze(queryStr); + + Assert.assertNotNull("INTO is null", analysis.getInto()); + Assert.assertNotNull("FROM is null", analysis.getFromDataSources()); + Assert.assertNotNull("SELECT is null", analysis.getSelectExpressions()); + Assert.assertNotNull("SELECT aliacs is null", analysis.getSelectExpressionAlias()); + Assert.assertTrue("FROM was not analyzed correctly.", + analysis.getFromDataSources().get(0).getLeft().getName() + .equalsIgnoreCase("test1")); + + String + select1 = + SqlFormatterQueryRewrite.formatSql(analysis.getSelectExpressions().get(0)) + .replace("\n", " "); + Assert.assertTrue(select1.equalsIgnoreCase("(TEST1.COL0 = 10)")); + String + select2 = + SqlFormatterQueryRewrite.formatSql(analysis.getSelectExpressions().get(1)) + .replace("\n", " "); + Assert.assertTrue(select2.equalsIgnoreCase("TEST1.COL2")); + String + select3 = + SqlFormatterQueryRewrite.formatSql(analysis.getSelectExpressions().get(2)) + .replace("\n", " "); + Assert.assertTrue(select3.equalsIgnoreCase("(TEST1.COL3 > TEST1.COL1)")); + Assert.assertTrue("testFilterAnalysis failed.", analysis.getWhereExpression().toString().equalsIgnoreCase("(TEST1.COL0 > 20)")); + + } +} diff --git a/ksql-core/src/test/java/io/confluent/ksql/integtests/json/JsonFormatTest.java b/ksql-core/src/test/java/io/confluent/ksql/integtests/json/JsonFormatTest.java new file mode 100644 index 000000000000..57e8e7279760 --- /dev/null +++ b/ksql-core/src/test/java/io/confluent/ksql/integtests/json/JsonFormatTest.java @@ -0,0 +1,606 @@ +package io.confluent.ksql.integtests.json; + +import io.confluent.ksql.util.OrderDataProvider; +import io.confluent.ksql.util.TopicConsumer; +import io.confluent.ksql.util.TopicProducer; +import io.confluent.ksql.KsqlEngine; +import io.confluent.ksql.metastore.MetaStore; +import io.confluent.ksql.physical.GenericRow; +import io.confluent.ksql.testutils.EmbeddedSingleNodeKafkaCluster; +import io.confluent.ksql.util.KafkaTopicClient; +import io.confluent.ksql.util.KafkaTopicClientImpl; +import io.confluent.ksql.util.KsqlConfig; +import io.confluent.ksql.util.PersistentQueryMetadata; +import io.confluent.ksql.util.QueryMetadata; +import io.confluent.ksql.util.SchemaUtil; +import org.apache.kafka.clients.consumer.ConsumerConfig; +import org.apache.kafka.clients.producer.RecordMetadata; +import org.apache.kafka.common.serialization.Deserializer; +import org.apache.kafka.common.serialization.StringDeserializer; +import org.apache.kafka.connect.data.Schema; +import org.apache.kafka.connect.data.SchemaBuilder; +import org.apache.kafka.streams.kstream.Windowed; +import org.apache.kafka.streams.kstream.internals.TimeWindow; +import org.apache.kafka.streams.kstream.internals.WindowedDeserializer; +import org.junit.After; +import org.junit.Assert; +import org.junit.Before; +import org.junit.ClassRule; +import org.junit.Test; +import org.slf4j.Logger; +import org.slf4j.LoggerFactory; + +import java.util.Arrays; +import java.util.Collections; +import java.util.HashMap; +import java.util.List; +import java.util.Map; + +import static io.confluent.ksql.util.KsqlTestUtil.assertExpectedResults; +import static io.confluent.ksql.util.KsqlTestUtil.assertExpectedWindowedResults; + +public class JsonFormatTest { + + private MetaStore metaStore; + private KsqlEngine ksqlEngine; + private TopicProducer topicProducer; + private TopicConsumer topicConsumer; + + private Map inputData; + private Map inputRecordsMetadata; + + @ClassRule + public static final EmbeddedSingleNodeKafkaCluster CLUSTER = new EmbeddedSingleNodeKafkaCluster(); + + private static final String inputTopic = "orders_topic"; + private static final String inputStream = "ORDERS"; + private static final String messageLogTopic = "log_topic"; + private static final String messageLogStream = "message_log"; + + private static final Logger log = LoggerFactory.getLogger(JsonFormatTest.class); + + @Before + public void before() throws Exception { + + Map configMap = new HashMap<>(); + configMap.put(ConsumerConfig.BOOTSTRAP_SERVERS_CONFIG, CLUSTER.bootstrapServers()); + configMap.put("application.id", "KSQL"); + configMap.put("commit.interval.ms", 0); + configMap.put("cache.max.bytes.buffering", 0); + configMap.put("auto.offset.reset", "earliest"); + + KsqlConfig ksqlConfig = new KsqlConfig(configMap); + ksqlEngine = new KsqlEngine(ksqlConfig, new KafkaTopicClientImpl(ksqlConfig)); + metaStore = ksqlEngine.getMetaStore(); + topicProducer = new TopicProducer(CLUSTER); + topicConsumer = new TopicConsumer(CLUSTER); + + createInitTopics(); + produceInitData(); + execInitCreateStreamQueries(); + + } + + private void createInitTopics() { + ksqlEngine.getKafkaTopicClient().createTopic(inputTopic, 1, (short)1); + ksqlEngine.getKafkaTopicClient().createTopic(messageLogTopic, 1, (short)1); + } + + private void produceInitData() throws Exception { + OrderDataProvider orderDataProvider = new OrderDataProvider(); + inputData = orderDataProvider.data(); + inputRecordsMetadata = topicProducer.produceInputData(inputTopic, orderDataProvider.data(), orderDataProvider.schema()); + + Schema messageSchema = SchemaBuilder.struct().field("MESSAGE", SchemaBuilder.STRING_SCHEMA).build(); + + GenericRow messageRow = new GenericRow(Arrays.asList + ("{\"log\":{\"@timestamp\":\"2017-05-30T16:44:22.175Z\",\"@version\":\"1\"," + + "\"caasVersion\":\"0.0.2\",\"cloud\":\"aws\",\"clusterId\":\"cp99\",\"clusterName\":\"kafka\",\"cpComponentId\":\"kafka\",\"host\":\"kafka-1-wwl0p\",\"k8sId\":\"k8s13\",\"k8sName\":\"perf\",\"level\":\"ERROR\",\"logger\":\"kafka.server.ReplicaFetcherThread\",\"message\":\"Found invalid messages during fetch for partition [foo512,172] offset 0 error Record is corrupt (stored crc = 1321230880, computed crc = 1139143803)\",\"networkId\":\"vpc-d8c7a9bf\",\"region\":\"us-west-2\",\"serverId\":\"1\",\"skuId\":\"sku5\",\"source\":\"kafka\",\"tenantId\":\"t47\",\"tenantName\":\"perf-test\",\"thread\":\"ReplicaFetcherThread-0-2\",\"zone\":\"us-west-2a\"},\"stream\":\"stdout\",\"time\":2017}")); + + Map records = new HashMap<>(); + records.put("1", messageRow); + + topicProducer.produceInputData(messageLogTopic, records, messageSchema); + } + + private void execInitCreateStreamQueries() throws Exception { + String ordersStreamStr = String.format("CREATE STREAM %s (ORDERTIME bigint, ORDERID varchar, " + + "ITEMID varchar, ORDERUNITS double, PRICEARRAY array, KEYVALUEMAP " + + "map) WITH (value_format = 'json', " + + "kafka_topic='%s' , " + + "key='ordertime');", inputStream, inputTopic); + + String messageStreamStr = String.format("CREATE STREAM %s (message varchar) WITH (value_format = 'json', " + + "kafka_topic='%s');", messageLogStream, messageLogTopic); + + ksqlEngine.buildMultipleQueries(false, ordersStreamStr, Collections.emptyMap()); + ksqlEngine.buildMultipleQueries(false, messageStreamStr, Collections.emptyMap()); + } + + @After + public void after() throws Exception { + ksqlEngine.close(); + } + + @Test + public void testSelectStar() throws Exception { + final String streamName = "SelectStarStream".toUpperCase(); + final String queryString = String.format("CREATE STREAM %s AS SELECT * FROM %s;", streamName, inputStream); + + PersistentQueryMetadata queryMetadata = + (PersistentQueryMetadata) ksqlEngine.buildMultipleQueries(true, queryString, Collections.emptyMap()).get(0); + queryMetadata.getKafkaStreams().start(); + + Schema resultSchema = SchemaUtil + .removeImplicitRowTimeRowKeyFromSchema(metaStore.getSource(streamName).getSchema()); + Map results = readNormalResults(streamName, resultSchema, inputData.size()); + + Assert.assertEquals(inputData.size(), results.size()); + assertExpectedResults(results, inputData); + + ksqlEngine.terminateQuery(queryMetadata.getId(), true); + } + + @Test + public void testSelectProject() throws Exception { + final String streamName = "SelectProjectStream".toUpperCase(); + final String queryString = + String.format("CREATE STREAM %s AS SELECT ITEMID, ORDERUNITS, PRICEARRAY FROM %s;", streamName, inputStream); + + PersistentQueryMetadata queryMetadata = + (PersistentQueryMetadata) ksqlEngine.buildMultipleQueries(true, queryString, Collections.emptyMap()).get(0); + queryMetadata.getKafkaStreams().start(); + + Schema resultSchema = SchemaUtil + .removeImplicitRowTimeRowKeyFromSchema(metaStore.getSource(streamName).getSchema()); + + Map expectedResults = new HashMap<>(); + expectedResults.put("1", new GenericRow(Arrays.asList("ITEM_1", 10.0, new + Double[]{100.0, + 110.99, + 90.0 }))); + expectedResults.put("2", new GenericRow(Arrays.asList("ITEM_2", 20.0, new + Double[]{10.0, + 10.99, + 9.0 }))); + + expectedResults.put("3", new GenericRow(Arrays.asList("ITEM_3", 30.0, new + Double[]{10.0, + 10.99, + 91.0 }))); + + expectedResults.put("4", new GenericRow(Arrays.asList("ITEM_4", 40.0, new + Double[]{10.0, + 140.99, + 94.0 }))); + + expectedResults.put("5", new GenericRow(Arrays.asList("ITEM_5", 50.0, new + Double[]{160.0, + 160.99, + 98.0 }))); + + expectedResults.put("6", new GenericRow(Arrays.asList("ITEM_6", 60.0, new + Double[]{1000.0, + 1100.99, + 900.0 }))); + + expectedResults.put("7", new GenericRow(Arrays.asList("ITEM_7", 70.0, new + Double[]{1100.0, + 1110.99, + 190.0 }))); + + expectedResults.put("8", new GenericRow(Arrays.asList("ITEM_8", 80.0, new + Double[]{1100.0, + 1110.99, + 970.0 }))); + + Map results = readNormalResults(streamName, resultSchema, expectedResults.size()); + + Assert.assertEquals(expectedResults.size(), results.size()); + assertExpectedResults(results, expectedResults); + + ksqlEngine.terminateQuery(queryMetadata.getId(), true); + } + + @Test + public void testSelectProjectKeyTimestamp() throws Exception { + final String streamName = "SelectProjectKeyTimestampStream".toUpperCase(); + final String queryString = + String.format("CREATE STREAM %s AS SELECT ROWKEY AS RKEY, ROWTIME AS RTIME, ITEMID " + + "FROM %s WHERE ORDERUNITS > 20 AND ITEMID = 'ITEM_8';", streamName, + inputStream); + + PersistentQueryMetadata queryMetadata = + (PersistentQueryMetadata) ksqlEngine.buildMultipleQueries(true, queryString, Collections.emptyMap()).get(0); + queryMetadata.getKafkaStreams().start(); + + Schema resultSchema = SchemaUtil + .removeImplicitRowTimeRowKeyFromSchema(metaStore.getSource(streamName).getSchema()); + + Map expectedResults = new HashMap<>(); + expectedResults.put("8", new GenericRow(Arrays.asList("8", inputRecordsMetadata.get("8") + .timestamp(), "ITEM_8"))); + + Map results = readNormalResults(streamName, resultSchema, expectedResults.size()); + + Assert.assertEquals(expectedResults.size(), results.size()); + assertExpectedResults(results, expectedResults); + + ksqlEngine.terminateQuery(queryMetadata.getId(), true); + } + + @Test + public void testTimestampColumnSelection() throws Exception { + final String stream1Name = "ORIGINALSTREAM"; + final String stream2Name = "TIMESTAMPSTREAM"; + final String query1String = + String.format("CREATE STREAM %s WITH (timestamp='RTIME') AS SELECT ROWKEY AS RKEY, " + + "ROWTIME+10000 AS " + + "RTIME, ROWTIME+100 AS RT100, ORDERID, ITEMID " + + "FROM %s WHERE ORDERUNITS > 20 AND ITEMID = 'ITEM_8'; " + + "CREATE STREAM %s AS SELECT ROWKEY AS NEWRKEY, " + + "ROWTIME AS NEWRTIME, RKEY, RTIME, RT100, ORDERID, ITEMID " + + "FROM %s ;", stream1Name, + inputStream, stream2Name, stream1Name); + + List queryMetadataList = ksqlEngine.buildMultipleQueries(true, query1String, Collections.emptyMap()); + + PersistentQueryMetadata query1Metadata = (PersistentQueryMetadata) queryMetadataList.get(0); + PersistentQueryMetadata query2Metadata = (PersistentQueryMetadata) queryMetadataList.get(1); + + query1Metadata.getKafkaStreams().start(); + query2Metadata.getKafkaStreams().start(); + + Schema resultSchema = SchemaUtil + .removeImplicitRowTimeRowKeyFromSchema(metaStore.getSource(stream2Name).getSchema()); + + Map expectedResults = new HashMap<>(); + expectedResults.put("8", new GenericRow(Arrays.asList("8", inputRecordsMetadata.get("8") + .timestamp() + 10000, "8", inputRecordsMetadata.get("8").timestamp() + 10000, + inputRecordsMetadata.get("8").timestamp + () + 100, "ORDER_6", "ITEM_8"))); + + Map results1 = readNormalResults(stream1Name, resultSchema, + expectedResults.size()); + + Map results = readNormalResults(stream2Name, resultSchema, + expectedResults.size()); + + Assert.assertEquals(expectedResults.size(), results.size()); + assertExpectedResults(results, expectedResults); + + ksqlEngine.terminateQuery(query1Metadata.getId(), true); + ksqlEngine.terminateQuery(query2Metadata.getId(), true); + } + + @Test + public void testSelectFilter() throws Exception { + final String streamName = "SelectFilterStream".toUpperCase(); + final String queryString = String.format( + "CREATE STREAM %s AS SELECT * FROM %s WHERE ORDERUNITS > 20 AND ITEMID = 'ITEM_8';", + streamName, + inputStream + ); + + PersistentQueryMetadata queryMetadata = + (PersistentQueryMetadata) ksqlEngine.buildMultipleQueries(true, queryString, Collections.emptyMap()).get(0); + queryMetadata.getKafkaStreams().start(); + + Schema resultSchema = SchemaUtil + .removeImplicitRowTimeRowKeyFromSchema(metaStore.getSource(streamName).getSchema()); + + Map expectedResults = new HashMap<>(); + Map mapField = new HashMap<>(); + mapField.put("key1", 1.0); + mapField.put("key2", 2.0); + mapField.put("key3", 3.0); + expectedResults.put("8", new GenericRow(Arrays.asList(8, "ORDER_6", + "ITEM_8", 80.0, new + Double[]{1100.0, + 1110.99, + 970.0 }, + mapField))); + + Map results = readNormalResults(streamName, resultSchema, expectedResults.size()); + + Assert.assertEquals(expectedResults.size(), results.size()); + assertExpectedResults(results, expectedResults); + + ksqlEngine.terminateQuery(queryMetadata.getId(), true); + } + + @Test + public void testSelectExpression() throws Exception { + final String streamName = "SelectExpressionStream".toUpperCase(); + + final String selectColumns = + "ITEMID, ORDERUNITS*10, PRICEARRAY[0]+10, KEYVALUEMAP['key1']*KEYVALUEMAP['key2']+10, PRICEARRAY[1]>1000"; + final String whereClause = "ORDERUNITS > 20 AND ITEMID LIKE '%_8'"; + + final String queryString = String.format( + "CREATE STREAM %s AS SELECT %s FROM %s WHERE %s;", + streamName, + selectColumns, + inputStream, + whereClause + ); + + PersistentQueryMetadata queryMetadata = + (PersistentQueryMetadata) ksqlEngine.buildMultipleQueries(true, queryString, Collections.emptyMap()).get(0); + queryMetadata.getKafkaStreams().start(); + + Schema resultSchema = SchemaUtil + .removeImplicitRowTimeRowKeyFromSchema(metaStore.getSource(streamName).getSchema()); + + Map expectedResults = new HashMap<>(); + expectedResults.put("8", new GenericRow(Arrays.asList("ITEM_8", 800.0, 1110.0, 12.0, true))); + + Map results = readNormalResults(streamName, resultSchema, expectedResults.size()); + + Assert.assertEquals(expectedResults.size(), results.size()); + assertExpectedResults(results, expectedResults); + + ksqlEngine.terminateQuery(queryMetadata.getId(), true); + } + + + @Test + public void testCastExpression() throws Exception { + final String streamName = "CastExpressionStream".toUpperCase(); + + final String selectColumns = + " CAST (ORDERUNITS AS INTEGER), CAST( PRICEARRAY[1]>1000 AS STRING), CAST (SUBSTRING" + + "(ITEMID, 5) AS DOUBLE), CAST(ORDERUNITS AS VARCHAR) "; + final String whereClause = "ORDERUNITS > 20 AND ITEMID LIKE '%_8'"; + + final String queryString = String.format( + "CREATE STREAM %s AS SELECT %s FROM %s WHERE %s;", + streamName, + selectColumns, + inputStream, + whereClause + ); + + PersistentQueryMetadata queryMetadata = + (PersistentQueryMetadata) ksqlEngine.buildMultipleQueries(true, queryString, Collections.emptyMap()).get(0); + queryMetadata.getKafkaStreams().start(); + + Schema resultSchema = SchemaUtil + .removeImplicitRowTimeRowKeyFromSchema(metaStore.getSource(streamName).getSchema()); + + Map expectedResults = new HashMap<>(); + expectedResults.put("8", new GenericRow(Arrays.asList(80, "true", 8.0, "80.0"))); + + + Map results = readNormalResults(streamName, resultSchema, expectedResults.size()); + + Assert.assertEquals(expectedResults.size(), results.size()); + assertExpectedResults(results, expectedResults); + + ksqlEngine.terminateQuery(queryMetadata.getId(), true); + } + + @Test + public void testSelectUDFs() throws Exception { + final String streamName = "SelectUDFsStream".toUpperCase(); + + final String selectColumns = + "ITEMID, ORDERUNITS*10, PRICEARRAY[0]+10, KEYVALUEMAP['key1']*KEYVALUEMAP['key2']+10, PRICEARRAY[1]>1000"; + final String whereClause = "ORDERUNITS > 20 AND ITEMID LIKE '%_8'"; + + final String queryString = String.format( + "CREATE STREAM %s AS SELECT %s FROM %s WHERE %s;", + streamName, + selectColumns, + inputStream, + whereClause + ); + + PersistentQueryMetadata queryMetadata = + (PersistentQueryMetadata) ksqlEngine.buildMultipleQueries(true, queryString, Collections.emptyMap()).get(0); + queryMetadata.getKafkaStreams().start(); + + Schema resultSchema = SchemaUtil + .removeImplicitRowTimeRowKeyFromSchema(metaStore.getSource(streamName).getSchema()); + + Map expectedResults = new HashMap<>(); + expectedResults.put("8", new GenericRow(Arrays.asList("ITEM_8", 800.0, 1110.0, 12.0, true))); + + Map results = readNormalResults(streamName, resultSchema, expectedResults.size()); + + Assert.assertEquals(expectedResults.size(), results.size()); + assertExpectedResults(results, expectedResults); + + ksqlEngine.terminateQuery(queryMetadata.getId(), true); + } + + @Test + public void testSelectUDFLogicalExpression() throws Exception { + final String streamName = "SelectUDFLogicalExpressionStream".toUpperCase(); + + final String selectColumns = + "ITEMID, ORDERUNITS*10, PRICEARRAY[0]+10, KEYVALUEMAP['key1']*KEYVALUEMAP['key2']+10, PRICEARRAY[1]>1000"; + final String whereClause = "UCASE(ITEMID) = 'ITEM_8' AND ORDERUNITS > 20"; + + final String queryString = String.format( + "CREATE STREAM %s AS SELECT %s FROM %s WHERE %s;", + streamName, + selectColumns, + inputStream, + whereClause + ); + + PersistentQueryMetadata queryMetadata = + (PersistentQueryMetadata) ksqlEngine.buildMultipleQueries(true, queryString, Collections.emptyMap()).get(0); + queryMetadata.getKafkaStreams().start(); + + Schema resultSchema = SchemaUtil + .removeImplicitRowTimeRowKeyFromSchema(metaStore.getSource(streamName).getSchema()); + + Map expectedResults = new HashMap<>(); + expectedResults.put("8", new GenericRow(Arrays.asList("ITEM_8", 800.0, 1110.0, 12.0, true))); + + Map results = readNormalResults(streamName, resultSchema, expectedResults.size()); + + Assert.assertEquals(expectedResults.size(), results.size()); + assertExpectedResults(results, expectedResults); + + ksqlEngine.terminateQuery(queryMetadata.getId(), true); + } + + //@Test + public void testSelectDateTimeUDFs() throws Exception { + final String streamName = "SelectDateTimeUDFsStream".toUpperCase(); + + final String selectColumns = + "(ORDERTIME+1500962514806) , TIMESTAMPTOSTRING(ORDERTIME+1500962514806, " + + "'yyyy-MM-dd HH:mm:ss.SSS'), " + + "STRINGTOTIMESTAMP" + + "(TIMESTAMPTOSTRING" + + "(ORDERTIME+1500962514806, 'yyyy-MM-dd HH:mm:ss.SSS'), 'yyyy-MM-dd HH:mm:ss.SSS')"; + final String whereClause = "ORDERUNITS > 20 AND ITEMID LIKE '%_8'"; + + final String queryString = String.format( + "CREATE STREAM %s AS SELECT %s FROM %s WHERE %s;", + streamName, + selectColumns, + inputStream, + whereClause + ); + + PersistentQueryMetadata queryMetadata = + (PersistentQueryMetadata) ksqlEngine.buildMultipleQueries(true, queryString, Collections.emptyMap()).get(0); + queryMetadata.getKafkaStreams().start(); + + Schema resultSchema = SchemaUtil + .removeImplicitRowTimeRowKeyFromSchema(metaStore.getSource(streamName).getSchema()); + + Map expectedResults = new HashMap<>(); + expectedResults.put("8", new GenericRow(Arrays.asList(1500962514814l, + "2017-07-24 23:01:54.814", + 1500962514814l))); + + Map results = readNormalResults(streamName, resultSchema, expectedResults.size()); + + Assert.assertEquals(expectedResults.size(), results.size()); + assertExpectedResults(results, expectedResults); + + ksqlEngine.terminateQuery(queryMetadata.getId(), true); + } + + @Test + public void testAggSelectStar() throws Exception { + + Map newRecordsMetadata = topicProducer.produceInputData(inputTopic, inputData, SchemaUtil + .removeImplicitRowTimeRowKeyFromSchema(ksqlEngine.getMetaStore().getSource(inputStream).getSchema())); + final String streamName = "AGGTEST"; + final long windowSizeMilliseconds = 2000; + final String selectColumns = + "ITEMID, COUNT(ITEMID), SUM(ORDERUNITS), SUM(ORDERUNITS)/COUNT(ORDERUNITS), SUM(PRICEARRAY[0]+10)"; + final String window = String.format("TUMBLING ( SIZE %d MILLISECOND)", windowSizeMilliseconds); + final String havingClause = "SUM(ORDERUNITS) > 150"; + + final String queryString = String.format( + "CREATE TABLE %s AS SELECT %s FROM %s WINDOW %s WHERE ORDERUNITS > 60 GROUP BY ITEMID " + + "HAVING %s;", + streamName, + selectColumns, + inputStream, + window, + havingClause + ); + + PersistentQueryMetadata queryMetadata = + (PersistentQueryMetadata) ksqlEngine.buildMultipleQueries(true, queryString, Collections.emptyMap()).get(0); + queryMetadata.getKafkaStreams().start(); + Schema resultSchema = SchemaUtil + .removeImplicitRowTimeRowKeyFromSchema(ksqlEngine.getMetaStore().getSource(streamName).getSchema()); + + long firstItem8Window = inputRecordsMetadata.get("8").timestamp() / windowSizeMilliseconds; + long secondItem8Window = newRecordsMetadata.get("8").timestamp() / windowSizeMilliseconds; + + Map, GenericRow> expectedResults = new HashMap<>(); + if (firstItem8Window == secondItem8Window) { + expectedResults.put( + new Windowed<>("ITEM_8",new TimeWindow(0, 1)), + new GenericRow(Arrays.asList("ITEM_8", 2, 160.0, 80.0, 2220.0)) + ); + } + + Map, GenericRow> results = readWindowedResults(streamName, resultSchema, expectedResults.size()); + + Assert.assertEquals(expectedResults.size(), results.size()); + assertExpectedWindowedResults(results, expectedResults); + + ksqlEngine.terminateQuery(queryMetadata.getId(), true); + } + + @Test + public void testSinkProperties() throws Exception { + final String streamName = "SinkPropertiesStream".toUpperCase(); + final int resultPartitionCount = 3; + final String queryString = String.format("CREATE STREAM %s WITH (PARTITIONS = %d) AS SELECT * " + + "FROM %s;", + streamName, resultPartitionCount, inputStream); + + PersistentQueryMetadata queryMetadata = + (PersistentQueryMetadata) ksqlEngine.buildMultipleQueries(true, queryString, Collections.emptyMap()).get(0); + queryMetadata.getKafkaStreams().start(); + + KafkaTopicClient kafkaTopicClient = ksqlEngine.getKafkaTopicClient(); + + /* + * It may take several seconds after AdminClient#createTopics returns + * success for all the brokers to become aware that the topics have been created. + * During this time, AdminClient#listTopics may not return information about the new topics. + */ + log.info("Wait for the created topic to appear in the topic list..."); + Thread.sleep(2000); + + Assert.assertTrue(kafkaTopicClient.isTopicExists(streamName)); + ksqlEngine.terminateQuery(queryMetadata.getId(), true); + } + + @Test + public void testJsonStreamExtractor() throws Exception { + + final String streamName = "JSONSTREAM"; + final String queryString = String.format("CREATE STREAM %s AS SELECT EXTRACTJSONFIELD" + + "(message, '$.log.cloud') " + + "FROM %s;", + streamName, messageLogStream); + + PersistentQueryMetadata queryMetadata = + (PersistentQueryMetadata) ksqlEngine.buildMultipleQueries(true, queryString, Collections.emptyMap()).get(0); + queryMetadata.getKafkaStreams().start(); + + Schema resultSchema = SchemaUtil + .removeImplicitRowTimeRowKeyFromSchema(metaStore.getSource(streamName).getSchema()); + + Map expectedResults = new HashMap<>(); + expectedResults.put("1", new GenericRow(Arrays.asList("aws"))); + + Map results = readNormalResults(streamName, resultSchema, expectedResults.size()); + + Assert.assertEquals(expectedResults.size(), results.size()); + assertExpectedResults(results, expectedResults); + + ksqlEngine.terminateQuery(queryMetadata.getId(), true); + } + + //*********************************************************// + + private Map readNormalResults(String resultTopic, Schema resultSchema, int expectedNumMessages) { + return topicConsumer.readResults(resultTopic, resultSchema, expectedNumMessages, new StringDeserializer()); + } + + private Map, GenericRow> readWindowedResults( + String resultTopic, + Schema resultSchema, + int expectedNumMessages + ) { + Deserializer> keyDeserializer = new WindowedDeserializer<>(new StringDeserializer()); + return topicConsumer.readResults(resultTopic, resultSchema, expectedNumMessages, keyDeserializer); + } + +} \ No newline at end of file diff --git a/ksql-core/src/test/java/io/confluent/ksql/metastore/MetastoreTest.java b/ksql-core/src/test/java/io/confluent/ksql/metastore/MetastoreTest.java new file mode 100644 index 000000000000..70fd8ac66eb6 --- /dev/null +++ b/ksql-core/src/test/java/io/confluent/ksql/metastore/MetastoreTest.java @@ -0,0 +1,62 @@ +/** + * Copyright 2017 Confluent Inc. + **/ +package io.confluent.ksql.metastore; + + +import io.confluent.ksql.serde.json.KsqlJsonTopicSerDe; +import io.confluent.ksql.util.KsqlTestUtil; +import org.junit.Assert; +import org.junit.Before; +import org.junit.Test; + +public class MetastoreTest { + + private MetaStore metaStore; + + @Before + public void init() { + metaStore = KsqlTestUtil.getNewMetaStore(); + } + + @Test + public void testTopicMap() { + KsqlTopic ksqlTopic1 = new KsqlTopic("testTopic", "testTopicKafka", new KsqlJsonTopicSerDe(null)); + metaStore.putTopic(ksqlTopic1); + KsqlTopic ksqlTopic2 = metaStore.getTopic("testTopic"); + Assert.assertNotNull(ksqlTopic2); + + // Check non-existent topic + KsqlTopic ksqlTopic3 = metaStore.getTopic("TESTTOPIC_"); + Assert.assertNull(ksqlTopic3); + } + + @Test + public void testStreamMap() { + StructuredDataSource structuredDataSource1 = metaStore.getSource("ORDERS"); + Assert.assertNotNull(structuredDataSource1); + Assert.assertTrue(structuredDataSource1.dataSourceType == DataSource.DataSourceType.KSTREAM); + + // Check non-existent stream + StructuredDataSource structuredDataSource2 = metaStore.getSource("nonExistentStream"); + Assert.assertNull(structuredDataSource2); + } + + @Test + public void testDelete() { + StructuredDataSource structuredDataSource1 = metaStore.getSource("ORDERS"); + StructuredDataSource structuredDataSource2 = new KsqlStream("testStream", + structuredDataSource1.getSchema(), + structuredDataSource1.getKeyField(), + structuredDataSource1.getTimestampField(), + structuredDataSource1.getKsqlTopic()); + + metaStore.putSource(structuredDataSource2); + StructuredDataSource structuredDataSource3 = metaStore.getSource("testStream"); + Assert.assertNotNull(structuredDataSource3); + metaStore.deleteSource("testStream"); + StructuredDataSource structuredDataSource4 = metaStore.getSource("testStream"); + Assert.assertNull(structuredDataSource4); + } + +} \ No newline at end of file diff --git a/ksql-core/src/test/java/io/confluent/ksql/metastore/MetastoreUtilTest.java b/ksql-core/src/test/java/io/confluent/ksql/metastore/MetastoreUtilTest.java new file mode 100644 index 000000000000..be0d93ea5120 --- /dev/null +++ b/ksql-core/src/test/java/io/confluent/ksql/metastore/MetastoreUtilTest.java @@ -0,0 +1,131 @@ +/** + * Copyright 2017 Confluent Inc. + **/ +package io.confluent.ksql.metastore; + +import io.confluent.ksql.serde.avro.KsqlAvroTopicSerDe; +import io.confluent.ksql.serde.json.KsqlJsonTopicSerDe; +import org.apache.kafka.connect.data.Field; +import org.apache.kafka.connect.data.Schema; +import org.apache.kafka.connect.data.SchemaBuilder; +import org.junit.Assert; +import org.junit.Test; + +import java.io.File; + +public class MetastoreUtilTest { + + private static final String TEST_RESOURCES_DIRECTORY = "src/test/resources/"; + + @Test + public void testMetastoreLoadingFromFile() throws Exception { + + MetaStore metaStore = new MetastoreUtil().loadMetaStoreFromJsonFile + (TEST_RESOURCES_DIRECTORY + "TestCatalog.json"); + Assert.assertNotNull(metaStore.getTopic("ORDERS_TOPIC")); + Assert.assertNotNull(metaStore.getTopic("USERS_TOPIC")); + Assert.assertNotNull(metaStore.getTopic("ORDERS_TOPIC_AVRO")); + Assert.assertNotNull(metaStore.getTopic("PAGEVIEW_TOPIC")); + + KsqlTopic ordersTopic = metaStore.getTopic("ORDERS_TOPIC"); + Assert.assertTrue(ordersTopic.getKsqlTopicSerDe() instanceof KsqlJsonTopicSerDe); + Assert.assertTrue(ordersTopic.getTopicName().equalsIgnoreCase("ORDERS_TOPIC")); + Assert.assertTrue(ordersTopic.getKafkaTopicName().equals("orders_kafka_topic")); + + KsqlTopic ordersAvroTopic = metaStore.getTopic("ORDERS_TOPIC_AVRO"); + Assert.assertTrue(ordersAvroTopic.getKsqlTopicSerDe() instanceof KsqlAvroTopicSerDe); + Assert.assertTrue(ordersAvroTopic.getTopicName().equalsIgnoreCase("ORDERS_TOPIC_AVRO")); + Assert.assertTrue(ordersAvroTopic.getKafkaTopicName().equals("orders_kafka_topic_avro")); + + KsqlTopic usersTopic = metaStore.getTopic("USERS_TOPIC"); + Assert.assertTrue(usersTopic.getKsqlTopicSerDe() instanceof KsqlJsonTopicSerDe); + Assert.assertTrue(usersTopic.getTopicName().equalsIgnoreCase("USERS_TOPIC")); + Assert.assertTrue(usersTopic.getKafkaTopicName().equals("users_kafka_topic_json")); + + StructuredDataSource orders = metaStore.getSource("ORDERS"); + Assert.assertTrue(orders instanceof KsqlStream); + Assert.assertTrue(orders.dataSourceType == DataSource.DataSourceType.KSTREAM); + Assert.assertTrue(orders.getSchema().fields().size() == 4); + Assert.assertTrue(orders.getKeyField().name().equalsIgnoreCase("ordertime")); + + StructuredDataSource orders_avro = metaStore.getSource("ORDERS_AVRO"); + Assert.assertTrue(orders_avro instanceof KsqlStream); + Assert.assertTrue(orders_avro.dataSourceType == DataSource.DataSourceType.KSTREAM); + Assert.assertTrue(orders_avro.getSchema().fields().size() == 4); + Assert.assertTrue(orders_avro.getKeyField().name().equalsIgnoreCase("ordertime")); + + StructuredDataSource users = metaStore.getSource("USERS"); + Assert.assertTrue(users instanceof KsqlTable); + Assert.assertTrue(users.dataSourceType == DataSource.DataSourceType.KTABLE); + Assert.assertTrue(users.getSchema().fields().size() == 4); + Assert.assertTrue(users.getKeyField().name().equalsIgnoreCase("userid")); + + } + + @Test + public void testReadWriteAvroSchema() { + + } + + // Without placing constraints on the output format of the exported catalog (which may change), it seems like the best + // way to test the writing capabilities of the MetastoreUtil is to just create a metastore, export it, and then re-load + // it, verifying that nothing has changed between the original and the re-loaded metastore. + // Although this effectively tests both exporting and importing a catalog, we can assume that as long as + // testMetastoreLoadingFromFile() succeeds, the only two possible causes of failure for this test are either the test + // itself being invalid, or the MetastoreUtil failing to export metastores properly. + @Test + public void testMetastoreWritingToFile() throws Exception { + MetastoreUtil metastoreUtil = new MetastoreUtil(); + File testCatalogFile = File.createTempFile("ExportedCatalog", ".json", new File(TEST_RESOURCES_DIRECTORY)); + + MetaStore expectedMetaStore = new MetaStoreImpl(); + + String topicName = "TOPIC_NAME"; + String kafkaTopicName = "KAFKA_TOPIC_NAME"; + KsqlTopic topic = new KsqlTopic(topicName, kafkaTopicName, new KsqlJsonTopicSerDe(null)); + expectedMetaStore.putTopic(topic); + + String tableSourceName = "TABLE_SOURCE"; + String tableKeyName = "TABLE_KEY"; + Schema tableSchema = SchemaBuilder.struct().field(tableKeyName, Schema.BOOLEAN_SCHEMA).name(tableSourceName).build(); + Field tableKey = tableSchema.field(tableKeyName); + String tableStateStore = "STATE_STORE"; + expectedMetaStore.putSource(new KsqlTable(tableSourceName, tableSchema, tableKey, null, topic, + tableStateStore, false)); + + String streamSourceName = "STREAM_SOURCE"; + String streamKeyName = "STREAM_KEY"; + Schema streamSchema = SchemaBuilder.struct().field(streamKeyName, Schema.INT64_SCHEMA).name(streamSourceName).build(); + Field streamKey = streamSchema.field(streamKeyName); + expectedMetaStore.putSource(new KsqlStream(streamSourceName, streamSchema, streamKey, + null, topic)); + + metastoreUtil.writeMetastoreToFile(testCatalogFile.getAbsolutePath(), expectedMetaStore); + MetaStore testMetaStore = metastoreUtil.loadMetaStoreFromJsonFile(testCatalogFile.getAbsolutePath()); + + Assert.assertNotNull(testMetaStore.getTopic(topicName)); + Assert.assertNotNull(testMetaStore.getSource(tableSourceName)); + Assert.assertNotNull(testMetaStore.getSource(streamSourceName)); + + KsqlTopic testTopic = testMetaStore.getTopic(topicName); + Assert.assertEquals(topicName, testTopic.getTopicName()); + Assert.assertEquals(kafkaTopicName, testTopic.getKafkaTopicName()); + Assert.assertTrue(testTopic.getKsqlTopicSerDe() instanceof KsqlJsonTopicSerDe); + + StructuredDataSource testTableSource = testMetaStore.getSource(tableSourceName); + Assert.assertTrue(testTableSource instanceof KsqlTable); + KsqlTable testTable = (KsqlTable) testTableSource; + Assert.assertEquals(tableSchema, testTable.getSchema()); + Assert.assertEquals(tableKey, testTable.getKeyField()); + Assert.assertEquals(tableStateStore, testTable.getStateStoreName()); + + StructuredDataSource testStreamSource = testMetaStore.getSource(streamSourceName); + Assert.assertTrue(testStreamSource instanceof KsqlStream); + KsqlStream testStream = (KsqlStream) testStreamSource; + Assert.assertEquals(streamSchema, testStream.getSchema()); + Assert.assertEquals(streamKey, testStream.getKeyField()); + + // Only delete the created file if the test is passed + testCatalogFile.delete(); + } +} \ No newline at end of file diff --git a/ksql-core/src/test/java/io/confluent/ksql/parser/KsqlParserTest.java b/ksql-core/src/test/java/io/confluent/ksql/parser/KsqlParserTest.java new file mode 100644 index 000000000000..533887ccc8e3 --- /dev/null +++ b/ksql-core/src/test/java/io/confluent/ksql/parser/KsqlParserTest.java @@ -0,0 +1,580 @@ +package io.confluent.ksql.parser; + + +import io.confluent.ksql.ddl.DdlConfig; +import io.confluent.ksql.exception.ParseFailedException; +import io.confluent.ksql.metastore.MetaStore; +import io.confluent.ksql.parser.tree.AliasedRelation; +import io.confluent.ksql.parser.tree.ComparisonExpression; +import io.confluent.ksql.parser.tree.CreateStream; +import io.confluent.ksql.parser.tree.CreateStreamAsSelect; +import io.confluent.ksql.parser.tree.CreateTable; +import io.confluent.ksql.parser.tree.DropStream; +import io.confluent.ksql.parser.tree.DropTable; +import io.confluent.ksql.parser.tree.ListProperties; +import io.confluent.ksql.parser.tree.ListStreams; +import io.confluent.ksql.parser.tree.ListTables; +import io.confluent.ksql.parser.tree.ListTopics; +import io.confluent.ksql.parser.tree.RegisterTopic; +import io.confluent.ksql.parser.tree.Join; +import io.confluent.ksql.parser.tree.Query; +import io.confluent.ksql.parser.tree.QuerySpecification; +import io.confluent.ksql.parser.tree.Relation; +import io.confluent.ksql.parser.tree.SetProperty; +import io.confluent.ksql.parser.tree.SingleColumn; +import io.confluent.ksql.parser.tree.Statement; +import io.confluent.ksql.util.KsqlTestUtil; +import org.junit.Assert; +import org.junit.Before; +import org.junit.Ignore; +import org.junit.Test; + +import java.util.List; + +public class KsqlParserTest { + + private static final KsqlParser KSQL_PARSER = new KsqlParser(); + + private MetaStore metaStore; + + @Before + public void init() { + + metaStore = KsqlTestUtil.getNewMetaStore(); + } + + @Test + public void testSimpleQuery() throws Exception { + String simpleQuery = "SELECT col0, col2, col3 FROM test1 WHERE col0 > 100;"; + Statement statement = KSQL_PARSER.buildAst(simpleQuery, metaStore).get(0); + + + Assert.assertTrue("testSimpleQuery fails", statement instanceof Query); + Query query = (Query) statement; + Assert.assertTrue("testSimpleQuery fails", query.getQueryBody() instanceof QuerySpecification); + QuerySpecification querySpecification = (QuerySpecification)query.getQueryBody(); + Assert.assertTrue("testSimpleQuery fails", querySpecification.getSelect().getSelectItems().size() == 3); + Assert.assertTrue("testSimpleQuery fails", querySpecification.getFrom().isPresent()); + Assert.assertTrue("testSimpleQuery fails", querySpecification.getWhere().isPresent()); + Assert.assertTrue("testSimpleQuery fails", querySpecification.getFrom().get() instanceof Relation); + Assert.assertTrue("testSimpleQuery fails", querySpecification.getWhere().get() instanceof ComparisonExpression); + ComparisonExpression comparisonExpression = (ComparisonExpression)querySpecification.getWhere().get(); + Assert.assertTrue("testSimpleQuery fails", comparisonExpression.getType().getValue().equalsIgnoreCase(">")); + + } + + @Test + public void testProjection() throws Exception { + String queryStr = "SELECT col0, col2, col3 FROM test1;"; + Statement statement = KSQL_PARSER.buildAst(queryStr, metaStore).get(0); + Assert.assertTrue("testProjection fails", statement instanceof Query); + Query query = (Query) statement; + Assert.assertTrue("testProjection fails", query.getQueryBody() instanceof QuerySpecification); + QuerySpecification querySpecification = (QuerySpecification)query.getQueryBody(); + Assert.assertTrue("testProjection fails", querySpecification.getSelect().getSelectItems().size() == 3); + Assert.assertTrue("testProjection fails", querySpecification.getSelect().getSelectItems().get(0) instanceof SingleColumn); + SingleColumn column0 = (SingleColumn)querySpecification.getSelect().getSelectItems().get(0); + Assert.assertTrue("testProjection fails", column0.getAlias().get().equalsIgnoreCase("COL0")); + Assert.assertTrue("testProjection fails", column0.getExpression().toString().equalsIgnoreCase("TEST1.COL0")); + } + + @Test + public void testProjectionWithArrayMap() throws Exception { + String queryStr = "SELECT col0, col2, col3, col4[0], col5['key1'] FROM test1;"; + Statement statement = KSQL_PARSER.buildAst(queryStr, metaStore).get(0); + Assert.assertTrue("testProjectionWithArrayMap fails", statement instanceof Query); + Query query = (Query) statement; + Assert.assertTrue("testProjectionWithArrayMap fails", query.getQueryBody() instanceof QuerySpecification); + QuerySpecification querySpecification = (QuerySpecification)query.getQueryBody(); + Assert.assertTrue("testProjectionWithArrayMap fails", querySpecification.getSelect().getSelectItems() + .size() == 5); + Assert.assertTrue("testProjectionWithArrayMap fails", querySpecification.getSelect().getSelectItems().get(0) instanceof SingleColumn); + SingleColumn column0 = (SingleColumn)querySpecification.getSelect().getSelectItems().get(0); + Assert.assertTrue("testProjectionWithArrayMap fails", column0.getAlias().get().equalsIgnoreCase("COL0")); + Assert.assertTrue("testProjectionWithArrayMap fails", column0.getExpression().toString().equalsIgnoreCase("TEST1.COL0")); + + SingleColumn column3 = (SingleColumn)querySpecification.getSelect().getSelectItems().get(3); + SingleColumn column4 = (SingleColumn)querySpecification.getSelect().getSelectItems().get(4); + Assert.assertTrue("testProjectionWithArrayMap fails", column3.getExpression().toString() + .equalsIgnoreCase("TEST1.COL4[0]")); + Assert.assertTrue("testProjectionWithArrayMap fails", column4.getExpression().toString() + .equalsIgnoreCase("TEST1.COL5['key1']")); + } + + @Test + public void testProjectFilter() throws Exception { + String queryStr = "SELECT col0, col2, col3 FROM test1 WHERE col0 > 100;"; + Statement statement = KSQL_PARSER.buildAst(queryStr, metaStore).get(0); + Assert.assertTrue("testSimpleQuery fails", statement instanceof Query); + Query query = (Query) statement; + Assert.assertTrue("testProjectFilter fails", query.getQueryBody() instanceof QuerySpecification); + QuerySpecification querySpecification = (QuerySpecification)query.getQueryBody(); + + Assert.assertTrue("testProjectFilter fails", querySpecification.getWhere().get() instanceof ComparisonExpression); + ComparisonExpression comparisonExpression = (ComparisonExpression)querySpecification.getWhere().get(); + Assert.assertTrue("testProjectFilter fails", comparisonExpression.toString().equalsIgnoreCase("(TEST1.COL0 > 100)")); + Assert.assertTrue("testProjectFilter fails", querySpecification.getSelect().getSelectItems().size() == 3); + + } + + @Test + public void testBinaryExpression() throws Exception { + String queryStr = "SELECT col0+10, col2, col3-col1 FROM test1;"; + Statement statement = KSQL_PARSER.buildAst(queryStr, metaStore).get(0); + Assert.assertTrue("testBinaryExpression fails", statement instanceof Query); + Query query = (Query) statement; + Assert.assertTrue("testBinaryExpression fails", query.getQueryBody() instanceof QuerySpecification); + QuerySpecification querySpecification = (QuerySpecification)query.getQueryBody(); + SingleColumn column0 = (SingleColumn)querySpecification.getSelect().getSelectItems().get(0); + Assert.assertTrue("testBinaryExpression fails", column0.getAlias().get().equalsIgnoreCase("KSQL_COL_0")); + Assert.assertTrue("testBinaryExpression fails", column0.getExpression().toString().equalsIgnoreCase("(TEST1.COL0 + 10)")); + } + + @Test + public void testBooleanExpression() throws Exception { + String queryStr = "SELECT col0 = 10, col2, col3 > col1 FROM test1;"; + Statement statement = KSQL_PARSER.buildAst(queryStr, metaStore).get(0); + Assert.assertTrue("testBooleanExpression fails", statement instanceof Query); + Query query = (Query) statement; + Assert.assertTrue("testProjection fails", query.getQueryBody() instanceof QuerySpecification); + QuerySpecification querySpecification = (QuerySpecification)query.getQueryBody(); + SingleColumn column0 = (SingleColumn)querySpecification.getSelect().getSelectItems().get(0); + Assert.assertTrue("testBooleanExpression fails", column0.getAlias().get().equalsIgnoreCase("KSQL_COL_0")); + Assert.assertTrue("testBooleanExpression fails", column0.getExpression().toString().equalsIgnoreCase("(TEST1.COL0 = 10)")); + } + + @Test + public void testLiterals() throws Exception { + String queryStr = "SELECT 10, col2, 'test', 2.5, true, -5 FROM test1;"; + Statement statement = KSQL_PARSER.buildAst(queryStr, metaStore).get(0); + Assert.assertTrue("testLiterals fails", statement instanceof Query); + Query query = (Query) statement; + Assert.assertTrue("testLiterals fails", query.getQueryBody() instanceof QuerySpecification); + QuerySpecification querySpecification = (QuerySpecification)query.getQueryBody(); + SingleColumn column0 = (SingleColumn)querySpecification.getSelect().getSelectItems().get(0); + Assert.assertTrue("testLiterals fails", column0.getAlias().get().equalsIgnoreCase("KSQL_COL_0")); + Assert.assertTrue("testLiterals fails", column0.getExpression().toString().equalsIgnoreCase("10")); + + SingleColumn column1 = (SingleColumn)querySpecification.getSelect().getSelectItems().get(1); + Assert.assertTrue("testLiterals fails", column1.getAlias().get().equalsIgnoreCase("COL2")); + Assert.assertTrue("testLiterals fails", column1.getExpression().toString().equalsIgnoreCase("TEST1.COL2")); + + SingleColumn column2 = (SingleColumn)querySpecification.getSelect().getSelectItems().get(2); + Assert.assertTrue("testLiterals fails", column2.getAlias().get().equalsIgnoreCase("KSQL_COL_2")); + Assert.assertTrue("testLiterals fails", column2.getExpression().toString().equalsIgnoreCase("'test'")); + + SingleColumn column3 = (SingleColumn)querySpecification.getSelect().getSelectItems().get(3); + Assert.assertTrue("testLiterals fails", column3.getAlias().get().equalsIgnoreCase("KSQL_COL_3")); + Assert.assertTrue("testLiterals fails", column3.getExpression().toString().equalsIgnoreCase("2.5")); + + SingleColumn column4 = (SingleColumn)querySpecification.getSelect().getSelectItems().get(4); + Assert.assertTrue("testLiterals fails", column4.getAlias().get().equalsIgnoreCase("KSQL_COL_4")); + Assert.assertTrue("testLiterals fails", column4.getExpression().toString().equalsIgnoreCase("true")); + + SingleColumn column5 = (SingleColumn)querySpecification.getSelect().getSelectItems().get(5); + Assert.assertTrue("testLiterals fails", column5.getAlias().get().equalsIgnoreCase("KSQL_COL_5")); + Assert.assertTrue("testLiterals fails", column5.getExpression().toString().equalsIgnoreCase("-5")); + } + + @Test + public void testBooleanLogicalExpression() throws Exception { + String + queryStr = + "SELECT 10, col2, 'test', 2.5, true, -5 FROM test1 WHERE col1 = 10 AND col2 LIKE 'val' OR col4 > 2.6 ;"; + Statement statement = KSQL_PARSER.buildAst(queryStr, metaStore).get(0); + Assert.assertTrue("testSimpleQuery fails", statement instanceof Query); + Query query = (Query) statement; + Assert.assertTrue("testProjection fails", query.getQueryBody() instanceof QuerySpecification); + QuerySpecification querySpecification = (QuerySpecification)query.getQueryBody(); + SingleColumn column0 = (SingleColumn)querySpecification.getSelect().getSelectItems().get(0); + Assert.assertTrue("testProjection fails", column0.getAlias().get().equalsIgnoreCase("KSQL_COL_0")); + Assert.assertTrue("testProjection fails", column0.getExpression().toString().equalsIgnoreCase("10")); + + SingleColumn column1 = (SingleColumn)querySpecification.getSelect().getSelectItems().get(1); + Assert.assertTrue("testProjection fails", column1.getAlias().get().equalsIgnoreCase("COL2")); + Assert.assertTrue("testProjection fails", column1.getExpression().toString().equalsIgnoreCase("TEST1.COL2")); + + SingleColumn column2 = (SingleColumn)querySpecification.getSelect().getSelectItems().get(2); + Assert.assertTrue("testProjection fails", column2.getAlias().get().equalsIgnoreCase("KSQL_COL_2")); + Assert.assertTrue("testProjection fails", column2.getExpression().toString().equalsIgnoreCase("'test'")); + + } + + @Test + public void testSimpleLeftJoin() throws Exception { + String + queryStr = + "SELECT t1.col1, t2.col1, t2.col4, col5, t2.col2 FROM test1 t1 LEFT JOIN test2 t2 ON " + + "t1.col1 = t2.col1;"; + Statement statement = KSQL_PARSER.buildAst(queryStr, metaStore).get(0); + Assert.assertTrue("testSimpleQuery fails", statement instanceof Query); + Query query = (Query) statement; + Assert.assertTrue("testSimpleLeftJoin fails", query.getQueryBody() instanceof QuerySpecification); + QuerySpecification querySpecification = (QuerySpecification)query.getQueryBody(); + Assert.assertTrue("testSimpleLeftJoin fails", querySpecification.getFrom().get() instanceof Join); + Join join = (Join) querySpecification.getFrom().get(); + Assert.assertTrue("testSimpleLeftJoin fails", join.getType().toString().equalsIgnoreCase("LEFT")); + + Assert.assertTrue("testSimpleLeftJoin fails", ((AliasedRelation)join.getLeft()).getAlias().equalsIgnoreCase("T1")); + Assert.assertTrue("testSimpleLeftJoin fails", ((AliasedRelation)join.getRight()).getAlias().equalsIgnoreCase("T2")); + + } + + @Test + public void testLeftJoinWithFilter() throws Exception { + String + queryStr = + "SELECT t1.col1, t2.col1, t2.col4, t2.col2 FROM test1 t1 LEFT JOIN test2 t2 ON t1.col1 = " + + "t2.col1 WHERE t2.col2 = 'test';"; + Statement statement = KSQL_PARSER.buildAst(queryStr, metaStore).get(0); + Assert.assertTrue("testSimpleQuery fails", statement instanceof Query); + Query query = (Query) statement; + Assert.assertTrue("testLeftJoinWithFilter fails", query.getQueryBody() instanceof QuerySpecification); + QuerySpecification querySpecification = (QuerySpecification)query.getQueryBody(); + Assert.assertTrue("testLeftJoinWithFilter fails", querySpecification.getFrom().get() instanceof Join); + Join join = (Join) querySpecification.getFrom().get(); + Assert.assertTrue("testLeftJoinWithFilter fails", join.getType().toString().equalsIgnoreCase("LEFT")); + + Assert.assertTrue("testLeftJoinWithFilter fails", ((AliasedRelation)join.getLeft()).getAlias().equalsIgnoreCase("T1")); + Assert.assertTrue("testLeftJoinWithFilter fails", ((AliasedRelation)join.getRight()).getAlias().equalsIgnoreCase("T2")); + + Assert.assertTrue("testLeftJoinWithFilter fails", querySpecification.getWhere().get().toString().equalsIgnoreCase("(T2.COL2 = 'test')")); + } + + @Test + public void testSelectAll() throws Exception { + String queryStr = "SELECT * FROM test1 t1;"; + Statement statement = KSQL_PARSER.buildAst(queryStr, metaStore).get(0); + Assert.assertTrue("testSelectAll fails", statement instanceof Query); + Query query = (Query) statement; + Assert.assertTrue("testSelectAll fails", query.getQueryBody() instanceof QuerySpecification); + QuerySpecification querySpecification = (QuerySpecification)query.getQueryBody(); + Assert.assertTrue("testSelectAll fails", querySpecification.getSelect().getSelectItems() + .size() == 6); + } + + @Test + public void testSelectAllJoin() throws Exception { + String + queryStr = + "SELECT * FROM test1 t1 LEFT JOIN test2 t2 ON t1.col1 = t2.col1 WHERE t2.col2 = 'test';"; + Statement statement = KSQL_PARSER.buildAst(queryStr, metaStore).get(0); + Assert.assertTrue("testSimpleQuery fails", statement instanceof Query); + Query query = (Query) statement; + Assert.assertTrue("testLeftJoinWithFilter fails", query.getQueryBody() instanceof QuerySpecification); + QuerySpecification querySpecification = (QuerySpecification)query.getQueryBody(); + Assert.assertTrue("testSelectAllJoin fails", querySpecification.getFrom().get() instanceof Join); + Join join = (Join) querySpecification.getFrom().get(); + Assert.assertTrue("testSelectAllJoin fails", querySpecification.getSelect().getSelectItems + ().size() == 11); + Assert.assertTrue("testLeftJoinWithFilter fails", ((AliasedRelation)join.getLeft()).getAlias().equalsIgnoreCase("T1")); + Assert.assertTrue("testLeftJoinWithFilter fails", ((AliasedRelation)join.getRight()).getAlias().equalsIgnoreCase("T2")); + } + + @Test + public void testUDF() throws Exception { + String queryStr = "SELECT lcase(col1), concat(col2,'hello'), floor(abs(col3)) FROM test1 t1;"; + Statement statement = KSQL_PARSER.buildAst(queryStr, metaStore).get(0); + Assert.assertTrue("testSelectAll fails", statement instanceof Query); + Query query = (Query) statement; + Assert.assertTrue("testSelectAll fails", query.getQueryBody() instanceof QuerySpecification); + QuerySpecification querySpecification = (QuerySpecification)query.getQueryBody(); + + SingleColumn column0 = (SingleColumn)querySpecification.getSelect().getSelectItems().get(0); + Assert.assertTrue("testProjection fails", column0.getAlias().get().equalsIgnoreCase("KSQL_COL_0")); + Assert.assertTrue("testProjection fails", column0.getExpression().toString().equalsIgnoreCase("LCASE(T1.COL1)")); + + SingleColumn column1 = (SingleColumn)querySpecification.getSelect().getSelectItems().get(1); + Assert.assertTrue("testProjection fails", column1.getAlias().get().equalsIgnoreCase("KSQL_COL_1")); + Assert.assertTrue("testProjection fails", column1.getExpression().toString().equalsIgnoreCase("CONCAT(T1.COL2, 'hello')")); + + SingleColumn column2 = (SingleColumn)querySpecification.getSelect().getSelectItems().get(2); + Assert.assertTrue("testProjection fails", column2.getAlias().get().equalsIgnoreCase("KSQL_COL_2")); + Assert.assertTrue("testProjection fails", column2.getExpression().toString().equalsIgnoreCase("FLOOR(ABS(T1.COL3))")); + } + + @Test + public void testRegisterTopic() throws Exception { + String + queryStr = + "REGISTER TOPIC orders_topic WITH (value_format = 'avro', " + + "avroschemafile='/Users/hojjat/avro_order_schema.avro',kafka_topic='orders_topic');"; + Statement statement = KSQL_PARSER.buildAst(queryStr, metaStore).get(0); + Assert.assertTrue("testRegisterTopic failed.", statement instanceof RegisterTopic); + RegisterTopic registerTopic = (RegisterTopic)statement; + Assert.assertTrue("testRegisterTopic failed.", registerTopic + .getName().toString().equalsIgnoreCase("ORDERS_TOPIC")); + Assert.assertTrue("testRegisterTopic failed.", registerTopic.getProperties().size() == 3); + Assert.assertTrue("testRegisterTopic failed.", registerTopic.getProperties().get(DdlConfig.VALUE_FORMAT_PROPERTY).toString().equalsIgnoreCase("'avro'")); + } + + @Test + public void testCreateStreamWithTopic() throws Exception { + String + queryStr = + "CREATE STREAM orders (ordertime bigint, orderid varchar, itemid varchar, orderunits " + + "double) WITH (registered_topic = 'orders_topic' , key='ordertime');"; + Statement statement = KSQL_PARSER.buildAst(queryStr, metaStore).get(0); + Assert.assertTrue("testCreateStream failed.", statement instanceof CreateStream); + CreateStream createStream = (CreateStream)statement; + Assert.assertTrue("testCreateStream failed.", createStream.getName().toString().equalsIgnoreCase("ORDERS")); + Assert.assertTrue("testCreateStream failed.", createStream.getElements().size() == 4); + Assert.assertTrue("testCreateStream failed.", createStream.getElements().get(0).getName().toString().equalsIgnoreCase("ordertime")); + Assert.assertTrue("testCreateStream failed.", createStream.getProperties().get(DdlConfig.TOPIC_NAME_PROPERTY).toString().equalsIgnoreCase("'orders_topic'")); + } + + @Test + public void testCreateStream() throws Exception { + String + queryStr = + "CREATE STREAM orders (ordertime bigint, orderid varchar, itemid varchar, orderunits " + + "double) WITH (value_format = 'avro', " + + "avroschemafile='/Users/hojjat/avro_order_schema.avro',kafka_topic='orders_topic');"; + Statement statement = KSQL_PARSER.buildAst(queryStr, metaStore).get(0); + Assert.assertTrue("testCreateStream failed.", statement instanceof CreateStream); + CreateStream createStream = (CreateStream)statement; + Assert.assertTrue("testCreateStream failed.", createStream.getName().toString().equalsIgnoreCase("ORDERS")); + Assert.assertTrue("testCreateStream failed.", createStream.getElements().size() == 4); + Assert.assertTrue("testCreateStream failed.", createStream.getElements().get(0).getName().toString().equalsIgnoreCase("ordertime")); + Assert.assertTrue("testCreateStream failed.", createStream.getProperties().get(DdlConfig.KAFKA_TOPIC_NAME_PROPERTY).toString().equalsIgnoreCase("'orders_topic'")); + Assert.assertTrue("testCreateStream failed.", createStream.getProperties().get(DdlConfig + .VALUE_FORMAT_PROPERTY).toString().equalsIgnoreCase("'avro'")); + Assert.assertTrue("testCreateStream failed.", createStream.getProperties().get(DdlConfig.AVRO_SCHEMA_FILE).toString().equalsIgnoreCase("'/Users/hojjat/avro_order_schema.avro'")); + } + + @Test + public void testCreateTableWithTopic() throws Exception { + String + queryStr = + "CREATE TABLE users (usertime bigint, userid varchar, regionid varchar, gender varchar) WITH (registered_topic = 'users_topic', key='userid', statestore='user_statestore');"; + Statement statement = KSQL_PARSER.buildAst(queryStr, metaStore).get(0); + Assert.assertTrue("testRegisterTopic failed.", statement instanceof CreateTable); + CreateTable createTable = (CreateTable)statement; + Assert.assertTrue("testCreateTable failed.", createTable.getName().toString().equalsIgnoreCase("USERS")); + Assert.assertTrue("testCreateTable failed.", createTable.getElements().size() == 4); + Assert.assertTrue("testCreateTable failed.", createTable.getElements().get(0).getName().toString().equalsIgnoreCase("usertime")); + Assert.assertTrue("testCreateTable failed.", createTable.getProperties().get(DdlConfig.TOPIC_NAME_PROPERTY).toString().equalsIgnoreCase("'users_topic'")); + } + + @Test + public void testCreateTable() throws Exception { + String + queryStr = + "CREATE TABLE users (usertime bigint, userid varchar, regionid varchar, gender varchar) " + + "WITH (kafka_topic = 'users_topic', value_format='json');"; + Statement statement = KSQL_PARSER.buildAst(queryStr, metaStore).get(0); + Assert.assertTrue("testRegisterTopic failed.", statement instanceof CreateTable); + CreateTable createTable = (CreateTable)statement; + Assert.assertTrue("testCreateTable failed.", createTable.getName().toString().equalsIgnoreCase("USERS")); + Assert.assertTrue("testCreateTable failed.", createTable.getElements().size() == 4); + Assert.assertTrue("testCreateTable failed.", createTable.getElements().get(0).getName().toString().equalsIgnoreCase("usertime")); + Assert.assertTrue("testCreateTable failed.", createTable.getProperties().get(DdlConfig.KAFKA_TOPIC_NAME_PROPERTY) + .toString().equalsIgnoreCase("'users_topic'")); + Assert.assertTrue("testCreateTable failed.", createTable.getProperties().get(DdlConfig.VALUE_FORMAT_PROPERTY) + .toString().equalsIgnoreCase("'json'")); + } + + @Test + public void testCreateStreamAsSelect() throws Exception { + + String + queryStr = + "CREATE STREAM bigorders_json WITH (value_format = 'json', " + + "kafka_topic='bigorders_topic') AS SELECT * FROM orders WHERE orderunits > 5 ;"; + Statement statement = KSQL_PARSER.buildAst(queryStr, metaStore).get(0); + Assert.assertTrue("testCreateStreamAsSelect failed.", statement instanceof CreateStreamAsSelect); + CreateStreamAsSelect createStreamAsSelect = (CreateStreamAsSelect)statement; + Assert.assertTrue("testCreateTable failed.", createStreamAsSelect.getName().toString().equalsIgnoreCase("bigorders_json")); + Assert.assertTrue("testCreateTable failed.", createStreamAsSelect.getQuery().getQueryBody() instanceof QuerySpecification); + QuerySpecification querySpecification = (QuerySpecification) createStreamAsSelect.getQuery().getQueryBody(); + Assert.assertTrue("testCreateTable failed.", querySpecification.getSelect().getSelectItems().size() == 4); + Assert.assertTrue("testCreateTable failed.", querySpecification.getWhere().get().toString().equalsIgnoreCase("(ORDERS.ORDERUNITS > 5)")); + Assert.assertTrue("testCreateTable failed.", ((AliasedRelation)querySpecification.getFrom().get()).getAlias().equalsIgnoreCase("ORDERS")); + } + + @Test + /* + TODO: Handle so-called identifier expressions as values in table properties (right now, the lack of single quotes + around in the variables and cause things to break). + */ + @Ignore + public void testCreateTopicFormatWithoutQuotes() throws Exception { + String ksqlTopic = "unquoted_topic"; + String format = "json"; + String kafkaTopic = "case_insensitive_kafka_topic"; + + String queryStr = String.format( + "REGISTER TOPIC %s WITH (value_format = %s, kafka_topic = %s);", + ksqlTopic, + format, + kafkaTopic + ); + Statement statement = KSQL_PARSER.buildAst(queryStr, metaStore).get(0); + Assert.assertTrue(statement instanceof RegisterTopic); + RegisterTopic registerTopic = (RegisterTopic) statement; + Assert.assertTrue(registerTopic.getName().toString().equalsIgnoreCase(ksqlTopic)); + Assert.assertTrue(registerTopic.getProperties().size() == 2); + Assert.assertTrue(registerTopic + .getProperties().get(DdlConfig.VALUE_FORMAT_PROPERTY).toString().equalsIgnoreCase(format)); + Assert.assertTrue(registerTopic.getProperties().get(DdlConfig.KAFKA_TOPIC_NAME_PROPERTY).toString().equalsIgnoreCase(kafkaTopic)); + } + + @Test + public void testShouldFailIfWrongKeyword() throws Exception { + try { + String simpleQuery = "SELLECT col0, col2, col3 FROM test1 WHERE col0 > 100;"; + Statement statement = KSQL_PARSER.buildAst(simpleQuery, metaStore).get(0); + Assert.fail(); + } catch (ParseFailedException e) { + String errorMessage = e.getMessage(); + Assert.assertTrue(errorMessage.toLowerCase().contains(("line 1:1: mismatched input 'SELLECT'" + " expecting").toLowerCase())); + } + } + + @Test + public void testSelectTumblingWindow() throws Exception { + + String + queryStr = + "select itemid, sum(orderunits) from orders window TUMBLING ( size 30 second) where orderunits > 5 group by itemid;"; + Statement statement = KSQL_PARSER.buildAst(queryStr, metaStore).get(0); + Assert.assertTrue("testSelectTumblingWindow failed.", statement instanceof Query); + Query query = (Query) statement; + Assert.assertTrue("testSelectTumblingWindow failed.", query.getQueryBody() instanceof QuerySpecification); + QuerySpecification querySpecification = (QuerySpecification) query.getQueryBody(); + Assert.assertTrue("testCreateTable failed.", querySpecification.getSelect().getSelectItems + ().size() == 2); + Assert.assertTrue("testSelectTumblingWindow failed.", querySpecification.getWhere().get().toString().equalsIgnoreCase("(ORDERS.ORDERUNITS > 5)")); + Assert.assertTrue("testSelectTumblingWindow failed.", ((AliasedRelation)querySpecification.getFrom().get()).getAlias().equalsIgnoreCase("ORDERS")); + Assert.assertTrue("testSelectTumblingWindow failed.", querySpecification + .getWindowExpression().isPresent()); + Assert.assertTrue("testSelectTumblingWindow failed.", querySpecification + .getWindowExpression().get().toString().equalsIgnoreCase(" WINDOW STREAMWINDOW TUMBLING ( SIZE 30 SECOND ) ")); + } + + @Test + public void testSelectHuppingWindow() throws Exception { + + String + queryStr = + "select itemid, sum(orderunits) from orders window HOPPING ( size 30 second, advance by 5" + + " seconds) " + + "where " + + "orderunits" + + " > 5 group by itemid;"; + Statement statement = KSQL_PARSER.buildAst(queryStr, metaStore).get(0); + Assert.assertTrue("testSelectTumblingWindow failed.", statement instanceof Query); + Query query = (Query) statement; + Assert.assertTrue("testSelectTumblingWindow failed.", query.getQueryBody() instanceof QuerySpecification); + QuerySpecification querySpecification = (QuerySpecification) query.getQueryBody(); + Assert.assertTrue("testCreateTable failed.", querySpecification.getSelect().getSelectItems + ().size() == 2); + Assert.assertTrue("testSelectTumblingWindow failed.", querySpecification.getWhere().get().toString().equalsIgnoreCase("(ORDERS.ORDERUNITS > 5)")); + Assert.assertTrue("testSelectTumblingWindow failed.", ((AliasedRelation)querySpecification.getFrom().get()).getAlias().equalsIgnoreCase("ORDERS")); + Assert.assertTrue("testSelectTumblingWindow failed.", querySpecification + .getWindowExpression().isPresent()); + Assert.assertTrue("testSelectTumblingWindow failed.", querySpecification + .getWindowExpression().get().toString().equalsIgnoreCase(" WINDOW STREAMWINDOW HOPPING ( SIZE 30 SECOND , ADVANCE BY 5 SECOND ) ")); + } + + @Test + public void testSelectSessionWindow() throws Exception { + + String + queryStr = + "select itemid, sum(orderunits) from orders window SESSION ( 30 second) where " + + "orderunits > 5 group by itemid;"; + Statement statement = KSQL_PARSER.buildAst(queryStr, metaStore).get(0); + Assert.assertTrue("testSelectSessionWindow failed.", statement instanceof Query); + Query query = (Query) statement; + Assert.assertTrue("testSelectSessionWindow failed.", query.getQueryBody() instanceof QuerySpecification); + QuerySpecification querySpecification = (QuerySpecification) query.getQueryBody(); + Assert.assertTrue("testCreateTable failed.", querySpecification.getSelect().getSelectItems + ().size() == 2); + Assert.assertTrue("testSelectSessionWindow failed.", querySpecification.getWhere().get().toString().equalsIgnoreCase("(ORDERS.ORDERUNITS > 5)")); + Assert.assertTrue("testSelectSessionWindow failed.", ((AliasedRelation)querySpecification.getFrom().get()).getAlias().equalsIgnoreCase("ORDERS")); + Assert.assertTrue("testSelectSessionWindow failed.", querySpecification + .getWindowExpression().isPresent()); + Assert.assertTrue("testSelectSessionWindow failed.", querySpecification + .getWindowExpression().get().toString().equalsIgnoreCase(" WINDOW STREAMWINDOW SESSION " + + "( 30 SECOND ) ")); + } + + @Test + public void testShowTopics() throws Exception { + String simpleQuery = "SHOW TOPICS;"; + Statement statement = KSQL_PARSER.buildAst(simpleQuery, metaStore).get(0); + Assert.assertTrue(statement instanceof ListTopics); + ListTopics listTopics = (ListTopics) statement; + Assert.assertTrue(listTopics.toString().equalsIgnoreCase("ListTopics{}")); + } + + @Test + public void testShowStreams() throws Exception { + String simpleQuery = "SHOW STREAMS;"; + Statement statement = KSQL_PARSER.buildAst(simpleQuery, metaStore).get(0); + Assert.assertTrue(statement instanceof ListStreams); + ListStreams listStreams = (ListStreams) statement; + Assert.assertTrue(listStreams.toString().equalsIgnoreCase("ListStreams{}")); + } + + @Test + public void testShowTables() throws Exception { + String simpleQuery = "SHOW TABLES;"; + Statement statement = KSQL_PARSER.buildAst(simpleQuery, metaStore).get(0); + Assert.assertTrue(statement instanceof ListTables); + ListTables listTables = (ListTables) statement; + Assert.assertTrue(listTables.toString().equalsIgnoreCase("ListTables{}")); + } + + @Test + public void testShowProperties() throws Exception { + String simpleQuery = "SHOW PROPERTIES;"; + Statement statement = KSQL_PARSER.buildAst(simpleQuery, metaStore).get(0); + Assert.assertTrue(statement instanceof ListProperties); + ListProperties listProperties = (ListProperties) statement; + Assert.assertTrue(listProperties.toString().equalsIgnoreCase("ListProperties{}")); + } + + @Test + public void testSetProperties() throws Exception { + String simpleQuery = "set 'auto.offset.reset'='earliest';"; + Statement statement = KSQL_PARSER.buildAst(simpleQuery, metaStore).get(0); + Assert.assertTrue(statement instanceof SetProperty); + SetProperty setProperty = (SetProperty) statement; + Assert.assertTrue(setProperty.toString().equalsIgnoreCase("SetProperty{}")); + Assert.assertTrue(setProperty.getPropertyName().equalsIgnoreCase("auto.offset.reset")); + Assert.assertTrue(setProperty.getPropertyValue().equalsIgnoreCase("earliest")); + } + + @Test + public void testSelectSinkProperties() throws Exception { + String simpleQuery = "create stream s1 with (timestamp='orderid', partitions = 3) as select " + + "col1, col2" + + " from orders where col2 is null and col3 is not null or (col3*col2 = " + + "12);"; + Statement statement = KSQL_PARSER.buildAst(simpleQuery, metaStore).get(0); + Assert.assertTrue("testSelectTumblingWindow failed.", statement instanceof CreateStreamAsSelect); + CreateStreamAsSelect createStreamAsSelect = (CreateStreamAsSelect) statement; + Assert.assertTrue("testSelectTumblingWindow failed.", createStreamAsSelect.getQuery().getQueryBody() + instanceof QuerySpecification); + QuerySpecification querySpecification = (QuerySpecification) + createStreamAsSelect.getQuery().getQueryBody(); + Assert.assertTrue(querySpecification.getWhere().toString().equalsIgnoreCase("Optional[(((ORDERS.COL2 IS NULL) AND (ORDERS.COL3 IS NOT NULL)) OR ((ORDERS.COL3 * ORDERS.COL2) = 12))]")); + } + + @Test + public void testDrop() throws Exception { + String simpleQuery = "DROP STREAM STREAM1; DROP TABLE TABLE1;"; + List statements = KSQL_PARSER.buildAst(simpleQuery, metaStore); + Statement statement0 =statements.get(0); + Statement statement1 =statements.get(1); + Assert.assertTrue(statement0 instanceof DropStream); + Assert.assertTrue(statement1 instanceof DropTable); + DropStream dropStream = (DropStream) statement0; + DropTable dropTable = (DropTable) statement1; + Assert.assertTrue(dropStream.getName().toString().equalsIgnoreCase("STREAM1")); + Assert.assertTrue(dropTable.getName().toString().equalsIgnoreCase("TABLE1")); + } + +} diff --git a/ksql-core/src/test/java/io/confluent/ksql/physical/PhysicalPlanBuilderTest.java b/ksql-core/src/test/java/io/confluent/ksql/physical/PhysicalPlanBuilderTest.java new file mode 100644 index 000000000000..5650077369d9 --- /dev/null +++ b/ksql-core/src/test/java/io/confluent/ksql/physical/PhysicalPlanBuilderTest.java @@ -0,0 +1,177 @@ +package io.confluent.ksql.physical; + + +import io.confluent.ksql.analyzer.AggregateAnalysis; +import io.confluent.ksql.analyzer.AggregateAnalyzer; +import io.confluent.ksql.analyzer.Analysis; +import io.confluent.ksql.analyzer.AnalysisContext; +import io.confluent.ksql.analyzer.Analyzer; +import io.confluent.ksql.metastore.MetaStore; +import io.confluent.ksql.parser.KsqlParser; +import io.confluent.ksql.parser.rewrite.AggregateExpressionRewriter; +import io.confluent.ksql.parser.tree.Expression; +import io.confluent.ksql.parser.tree.ExpressionTreeRewriter; +import io.confluent.ksql.parser.tree.Statement; +import io.confluent.ksql.planner.LogicalPlanner; +import io.confluent.ksql.planner.plan.PlanNode; +import io.confluent.ksql.structured.SchemaKStream; +import io.confluent.ksql.structured.SchemaKTable; +import io.confluent.ksql.util.FakeKafkaTopicClient; +import io.confluent.ksql.util.KsqlConfig; +import io.confluent.ksql.util.KsqlTestUtil; +import org.apache.kafka.clients.consumer.ConsumerConfig; +import org.apache.kafka.connect.data.Schema; +import org.apache.kafka.streams.kstream.KStreamBuilder; +import org.junit.Assert; +import org.junit.Before; +import org.junit.Test; + +import java.util.HashMap; +import java.util.List; +import java.util.Map; + +public class PhysicalPlanBuilderTest { + + KStreamBuilder kStreamBuilder; + KsqlParser ksqlParser; + PhysicalPlanBuilder physicalPlanBuilder; + MetaStore metaStore; + + @Before + public void before() { + kStreamBuilder = new KStreamBuilder(); + ksqlParser = new KsqlParser(); + metaStore = KsqlTestUtil.getNewMetaStore(); + Map configMap = new HashMap<>(); + configMap.put(ConsumerConfig.BOOTSTRAP_SERVERS_CONFIG, "localhost:9092"); + configMap.put("application.id", "KSQL"); + configMap.put("commit.interval.ms", 0); + configMap.put("cache.max.bytes.buffering", 0); + configMap.put("auto.offset.reset", "earliest"); + physicalPlanBuilder = new PhysicalPlanBuilder(kStreamBuilder, new KsqlConfig(configMap), new FakeKafkaTopicClient()); + } + + private SchemaKStream buildPhysicalPlan(String queryStr) throws Exception { + List statements = ksqlParser.buildAst(queryStr, metaStore); + // Analyze the query to resolve the references and extract oeprations + Analysis analysis = new Analysis(); + Analyzer analyzer = new Analyzer(analysis, metaStore); + analyzer.process(statements.get(0), new AnalysisContext(null, null)); + + AggregateAnalysis aggregateAnalysis = new AggregateAnalysis(); + AggregateAnalyzer aggregateAnalyzer = new AggregateAnalyzer(aggregateAnalysis, metaStore, + analysis); + AggregateExpressionRewriter aggregateExpressionRewriter = new AggregateExpressionRewriter(); + for (Expression expression: analysis.getSelectExpressions()) { + aggregateAnalyzer.process(expression, new AnalysisContext(null, null)); + if (!aggregateAnalyzer.isHasAggregateFunction()) { + aggregateAnalysis.getNonAggResultColumns().add(expression); + } + aggregateAnalysis.getFinalSelectExpressions().add( + ExpressionTreeRewriter.rewriteWith(aggregateExpressionRewriter, expression)); + aggregateAnalyzer.setHasAggregateFunction(false); + } + // Build a logical plan + PlanNode logicalPlan = new LogicalPlanner(analysis, aggregateAnalysis).buildPlan(); + SchemaKStream schemaKStream = physicalPlanBuilder.buildPhysicalPlan(logicalPlan); + return schemaKStream; + } + + @Test + public void testSimpleSelect() throws Exception { + String simpleQuery = "SELECT col0, col2, col3 FROM test1 WHERE col0 > 100;"; + SchemaKStream schemaKStream = buildPhysicalPlan(simpleQuery); + Assert.assertNotNull(schemaKStream); + Assert.assertTrue(schemaKStream.getSchema().fields().size() == 3); + Assert.assertTrue(schemaKStream.getSchema().fields().get(0).name().equalsIgnoreCase("COL0")); + Assert.assertTrue(schemaKStream.getSchema().fields().get(1).schema() == Schema.STRING_SCHEMA); + Assert.assertTrue(schemaKStream.getSourceSchemaKStreams().get(0).getSchema().fields() + .size() == 6); + Assert.assertTrue(schemaKStream.getSourceSchemaKStreams().get(0).getSchema().fields().get(0).name().equalsIgnoreCase("TEST1.COL0")); + } + + @Test + public void testSimpleLeftJoinLogicalPlan() throws Exception { + String + simpleQuery = + "SELECT t1.col1, t2.col1, t2.col4, col5, t2.col2 FROM test1 t1 LEFT JOIN test2 t2 " + + "ON t1.col1 = t2.col1;"; + SchemaKStream schemaKStream = buildPhysicalPlan(simpleQuery); + Assert.assertNotNull(schemaKStream); + Assert.assertTrue(schemaKStream.getSchema().fields().size() == 5); + Assert.assertTrue(schemaKStream.getSchema().fields().get(0).name().equalsIgnoreCase + ("T1_COL1")); + Assert.assertTrue(schemaKStream.getSchema().fields().get(1).schema() == Schema.STRING_SCHEMA); + Assert.assertTrue(schemaKStream.getSchema().fields().get(3).name().equalsIgnoreCase + ("COL5")); + Assert.assertTrue(schemaKStream.getSourceSchemaKStreams().get(0).getSourceSchemaKStreams().size() == 2); + Assert.assertTrue(schemaKStream.getSourceSchemaKStreams().get(0).getSchema().fields() + .size() == 11); + } + + @Test + public void testSimpleLeftJoinFilterLogicalPlan() throws Exception { + String + simpleQuery = + "SELECT t1.col1, t2.col1, t2.col4, col5, t2.col2 FROM test1 t1 LEFT JOIN test2 t2 " + + "ON " + + "t1.col1 = t2.col1 WHERE t1.col0 > 10 AND t2.col3 = 10.8;"; + SchemaKStream schemaKStream = buildPhysicalPlan(simpleQuery); + Assert.assertNotNull(schemaKStream); + Assert.assertTrue(schemaKStream.getSchema().fields().size() == 5); + Assert.assertTrue(schemaKStream.getSchema().fields().get(1).name().equalsIgnoreCase + ("T2_COL1")); + Assert.assertTrue(schemaKStream.getSourceSchemaKStreams().get(0).getSchema().fields() + .size() == 11); + Assert.assertTrue(schemaKStream.getSourceSchemaKStreams().get(0).getSourceSchemaKStreams().get(0).getSourceSchemaKStreams().size() == 2); + } + + @Test + public void testSimpleAggregate() throws Exception { + String queryString = "SELECT col0, sum(col3), count(col3) FROM test1 window TUMBLING ( " + + "size 2 " + + "second) " + + "WHERE col0 > 100 GROUP BY col0;"; + SchemaKStream schemaKStream = buildPhysicalPlan(queryString); + Assert.assertNotNull(schemaKStream); + Assert.assertTrue(schemaKStream.getSchema().fields().size() == 3); + Assert.assertTrue(schemaKStream.getSchema().fields().get(0).name().equalsIgnoreCase("COL0")); + Assert.assertTrue(schemaKStream.getSchema().fields().get(1).schema() == Schema.FLOAT64_SCHEMA); + Assert.assertTrue(schemaKStream.getSourceSchemaKStreams().get(0).getSchema().fields().size() == 4); + Assert.assertTrue(schemaKStream.getSourceSchemaKStreams().get(0).getSchema().fields().get(0).name().equalsIgnoreCase("TEST1.COL0")); + } + + @Test + public void testSimpleAggregateNoWindow() throws Exception { + String queryString = "SELECT col0, sum(col3), count(col3) FROM test1 " + + "WHERE col0 > 100 GROUP BY col0;"; + SchemaKStream schemaKStream = buildPhysicalPlan(queryString); + Assert.assertNotNull(schemaKStream); + Assert.assertTrue(schemaKStream.getSchema().fields().size() == 3); + Assert.assertTrue(schemaKStream.getSchema().fields().get(0).name().equalsIgnoreCase("COL0")); + Assert.assertTrue(schemaKStream.getSchema().fields().get(1).schema() == Schema.FLOAT64_SCHEMA); + Assert.assertTrue(schemaKStream.getSourceSchemaKStreams().get(0).getSchema().fields().size() == 4); + Assert.assertTrue(schemaKStream.getSchema().fields().get(0).name() + .equalsIgnoreCase("COL0")); + Assert.assertTrue(schemaKStream.getSourceSchemaKStreams().get(0) instanceof SchemaKTable); + Assert.assertTrue(((SchemaKTable) schemaKStream.getSourceSchemaKStreams().get(0)) + .isWindowed() == false); + } + + @Test + public void testExecutionPlan() throws Exception { + String queryString = "SELECT col0, sum(col3), count(col3) FROM test1 " + + "WHERE col0 > 100 GROUP BY col0;"; + SchemaKStream schemaKStream = buildPhysicalPlan(queryString); + String planText = schemaKStream.getExecutionPlan(""); + String[] lines = planText.split("\n"); + Assert.assertEquals(lines[0], " > [ SINK ] Schema: [COL0 : INT64 , KSQL_COL_1 : FLOAT64 " + + ", KSQL_COL_2 : INT64]."); + Assert.assertEquals(lines[1], "\t\t > [ AGGREGATE ] Schema: [TEST1.COL0 : INT64 , TEST1.COL3 : FLOAT64 , KSQL_AGG_VARIABLE_0 : FLOAT64 , KSQL_AGG_VARIABLE_1 : INT64]."); + Assert.assertEquals(lines[2], "\t\t\t\t > [ PROJECT ] Schema: [TEST1.COL0 : INT64 , TEST1.COL3 : FLOAT64]."); + Assert.assertEquals(lines[3], "\t\t\t\t\t\t > [ REKEY ] Schema: [TEST1.COL0 : INT64 , TEST1.COL1 : STRING , TEST1.COL2 : STRING , TEST1.COL3 : FLOAT64 , TEST1.COL4 : ARRAY , TEST1.COL5 : MAP]."); + Assert.assertEquals(lines[4], "\t\t\t\t\t\t\t\t > [ FILTER ] Schema: [TEST1.COL0 : INT64 , TEST1.COL1 : STRING , TEST1.COL2 : STRING , TEST1.COL3 : FLOAT64 , TEST1.COL4 : ARRAY , TEST1.COL5 : MAP]."); + Assert.assertEquals(lines[5], "\t\t\t\t\t\t\t\t\t\t > [ SOURCE ] Schema: [TEST1.COL0 : INT64 , TEST1.COL1 : STRING , TEST1.COL2 : STRING , TEST1.COL3 : FLOAT64 , TEST1.COL4 : ARRAY , TEST1.COL5 : MAP]."); + } + +} diff --git a/ksql-core/src/test/java/io/confluent/ksql/planner/LogicalPlannerTest.java b/ksql-core/src/test/java/io/confluent/ksql/planner/LogicalPlannerTest.java new file mode 100644 index 000000000000..a1f4c81c91cc --- /dev/null +++ b/ksql-core/src/test/java/io/confluent/ksql/planner/LogicalPlannerTest.java @@ -0,0 +1,157 @@ +package io.confluent.ksql.planner; + +import io.confluent.ksql.analyzer.AggregateAnalysis; +import io.confluent.ksql.analyzer.AggregateAnalyzer; +import io.confluent.ksql.analyzer.Analysis; +import io.confluent.ksql.analyzer.AnalysisContext; +import io.confluent.ksql.analyzer.Analyzer; +import io.confluent.ksql.metastore.MetaStore; +import io.confluent.ksql.parser.KsqlParser; +import io.confluent.ksql.parser.tree.Expression; +import io.confluent.ksql.parser.tree.Statement; +import io.confluent.ksql.planner.plan.AggregateNode; +import io.confluent.ksql.planner.plan.FilterNode; +import io.confluent.ksql.planner.plan.JoinNode; +import io.confluent.ksql.planner.plan.PlanNode; +import io.confluent.ksql.planner.plan.ProjectNode; +import io.confluent.ksql.planner.plan.StructuredDataSourceNode; +import io.confluent.ksql.util.KsqlTestUtil; +import org.apache.kafka.connect.data.Schema; +import org.junit.Assert; +import org.junit.Before; +import org.junit.Test; + +import java.util.List; + +public class LogicalPlannerTest { + + private static final KsqlParser KSQL_PARSER = new KsqlParser(); + + private MetaStore metaStore; + + @Before + public void init() { + metaStore = KsqlTestUtil.getNewMetaStore(); + } + + private PlanNode buildLogicalPlan(String queryStr) { + List statements = KSQL_PARSER.buildAst(queryStr, metaStore); + // Analyze the query to resolve the references and extract oeprations + Analysis analysis = new Analysis(); + Analyzer analyzer = new Analyzer(analysis, metaStore); + analyzer.process(statements.get(0), new AnalysisContext(null, null)); + AggregateAnalysis aggregateAnalysis = new AggregateAnalysis(); + AggregateAnalyzer aggregateAnalyzer = new AggregateAnalyzer(aggregateAnalysis, metaStore, analysis); + for (Expression expression: analysis.getSelectExpressions()) { + aggregateAnalyzer.process(expression, new AnalysisContext(null, null)); + } + // Build a logical plan + PlanNode logicalPlan = new LogicalPlanner(analysis, aggregateAnalysis).buildPlan(); + return logicalPlan; + } + + @Test + public void testSimpleQueryLogicalPlan() throws Exception { + String simpleQuery = "SELECT col0, col2, col3 FROM test1 WHERE col0 > 100;"; + PlanNode logicalPlan = buildLogicalPlan(simpleQuery); + +// Assert.assertTrue(logicalPlan instanceof OutputKafkaTopicNode); + Assert.assertTrue(logicalPlan.getSources().get(0) instanceof ProjectNode); + Assert.assertTrue(logicalPlan.getSources().get(0).getSources().get(0) instanceof FilterNode); + Assert.assertTrue(logicalPlan.getSources().get(0).getSources().get(0).getSources() + .get(0) instanceof StructuredDataSourceNode); + + Assert.assertTrue(logicalPlan.getSchema().fields().size() == 3); + Assert.assertNotNull( + ((FilterNode) logicalPlan.getSources().get(0).getSources().get(0)).getPredicate()); + } + + @Test + public void testSimpleLeftJoinLogicalPlan() throws Exception { + String simpleQuery = "SELECT t1.col1, t2.col1, t1.col4, t2.col2 FROM test1 t1 LEFT JOIN test2 t2 ON t1.col1 = t2.col1;"; + PlanNode logicalPlan = buildLogicalPlan(simpleQuery); + +// Assert.assertTrue(logicalPlan instanceof OutputKafkaTopicNode); + Assert.assertTrue(logicalPlan.getSources().get(0) instanceof ProjectNode); + Assert.assertTrue(logicalPlan.getSources().get(0).getSources().get(0) instanceof JoinNode); + Assert.assertTrue(logicalPlan.getSources().get(0).getSources().get(0).getSources() + .get(0) instanceof StructuredDataSourceNode); + Assert.assertTrue(logicalPlan.getSources().get(0).getSources().get(0).getSources() + .get(1) instanceof StructuredDataSourceNode); + + Assert.assertTrue(logicalPlan.getSchema().fields().size() == 4); + + } + + @Test + public void testSimpleLeftJoinFilterLogicalPlan() throws Exception { + String + simpleQuery = + "SELECT t1.col1, t2.col1, col5, t2.col4, t2.col2 FROM test1 t1 LEFT JOIN test2 t2 ON " + + "t1.col1 = t2.col1 WHERE t1.col1 > 10 AND t2.col4 = 10.8;"; + PlanNode logicalPlan = buildLogicalPlan(simpleQuery); + +// Assert.assertTrue(logicalPlan instanceof OutputKafkaTopicNode); + Assert.assertTrue(logicalPlan.getSources().get(0) instanceof ProjectNode); + ProjectNode projectNode = (ProjectNode) logicalPlan.getSources().get(0); + + Assert.assertTrue(projectNode.getKeyField().name().equalsIgnoreCase("t1.col1")); + Assert.assertTrue(projectNode.getSchema().fields().size() == 5); + + Assert.assertTrue(projectNode.getSources().get(0) instanceof FilterNode); + FilterNode filterNode = (FilterNode) projectNode.getSources().get(0); + Assert.assertTrue(filterNode.getPredicate().toString() + .equalsIgnoreCase("((T1.COL1 > 10) AND (T2.COL4 = 10.8))")); + + Assert.assertTrue(filterNode.getSources().get(0) instanceof JoinNode); + JoinNode joinNode = (JoinNode) filterNode.getSources().get(0); + Assert.assertTrue(joinNode.getSources().get(0) instanceof StructuredDataSourceNode); + Assert.assertTrue(joinNode.getSources().get(1) instanceof StructuredDataSourceNode); + + } + + @Test + public void testSimpleAggregateLogicalPlan() throws Exception { + String simpleQuery = "SELECT col0, sum(col3), count(col3) FROM test1 window TUMBLING ( size 2 " + + "second) " + + "WHERE col0 > 100 GROUP BY col0;"; + + PlanNode logicalPlan = buildLogicalPlan(simpleQuery); + + Assert.assertTrue(logicalPlan.getSources().get(0) instanceof AggregateNode); + AggregateNode aggregateNode = (AggregateNode) logicalPlan.getSources().get(0); + Assert.assertTrue(aggregateNode.getFunctionList().size() == 2); + Assert.assertTrue(aggregateNode.getFunctionList().get(0).getName().getSuffix() + .equalsIgnoreCase("sum")); + Assert.assertTrue(aggregateNode.getWindowExpression().getKsqlWindowExpression().toString().equalsIgnoreCase(" TUMBLING ( SIZE 2 SECOND ) ")); + Assert.assertTrue(aggregateNode.getGroupByExpressions().size() == 1); + Assert.assertTrue(aggregateNode.getGroupByExpressions().get(0).toString().equalsIgnoreCase("TEST1.COL0")); + Assert.assertTrue(aggregateNode.getRequiredColumnList().size() == 2); + Assert.assertTrue(aggregateNode.getSchema().fields().get(1).schema() == Schema.FLOAT64_SCHEMA); + Assert.assertTrue(aggregateNode.getSchema().fields().get(2).schema() == Schema.INT64_SCHEMA); + Assert.assertTrue(logicalPlan.getSources().get(0).getSchema().fields().size() == 3); + + } + + @Test + public void testComplexAggregateLogicalPlan() throws Exception { + String simpleQuery = "SELECT col0, sum(floor(col3)*100)/count(col3) FROM test1 window " + + "HOPPING ( size 2 second, advance by 1 second) " + + "WHERE col0 > 100 GROUP BY col0;"; + + PlanNode logicalPlan = buildLogicalPlan(simpleQuery); + + Assert.assertTrue(logicalPlan.getSources().get(0) instanceof AggregateNode); + AggregateNode aggregateNode = (AggregateNode) logicalPlan.getSources().get(0); + Assert.assertTrue(aggregateNode.getFunctionList().size() == 2); + Assert.assertTrue(aggregateNode.getFunctionList().get(0).getName().getSuffix() + .equalsIgnoreCase("sum")); + Assert.assertTrue(aggregateNode.getWindowExpression().getKsqlWindowExpression().toString().equalsIgnoreCase(" HOPPING ( SIZE 2 SECOND , ADVANCE BY 1 SECOND ) ")); + Assert.assertTrue(aggregateNode.getGroupByExpressions().size() == 1); + Assert.assertTrue(aggregateNode.getGroupByExpressions().get(0).toString().equalsIgnoreCase("TEST1.COL0")); + Assert.assertTrue(aggregateNode.getRequiredColumnList().size() == 2); + Assert.assertTrue(aggregateNode.getSchema().fields().get(1).schema() == Schema.FLOAT64_SCHEMA); + Assert.assertTrue(logicalPlan.getSources().get(0).getSchema().fields().size() == 2); + + } +} diff --git a/ksql-core/src/test/java/io/confluent/ksql/structured/SchemaKStreamTest.java b/ksql-core/src/test/java/io/confluent/ksql/structured/SchemaKStreamTest.java new file mode 100644 index 000000000000..f7bda188fc31 --- /dev/null +++ b/ksql-core/src/test/java/io/confluent/ksql/structured/SchemaKStreamTest.java @@ -0,0 +1,178 @@ +package io.confluent.ksql.structured; + + +import io.confluent.ksql.analyzer.AggregateAnalysis; +import io.confluent.ksql.analyzer.AggregateAnalyzer; +import io.confluent.ksql.analyzer.Analysis; +import io.confluent.ksql.analyzer.AnalysisContext; +import io.confluent.ksql.analyzer.Analyzer; +import io.confluent.ksql.metastore.KsqlStream; +import io.confluent.ksql.metastore.MetaStore; +import io.confluent.ksql.parser.KsqlParser; +import io.confluent.ksql.parser.rewrite.SqlFormatterQueryRewrite; +import io.confluent.ksql.parser.tree.Expression; +import io.confluent.ksql.parser.tree.Statement; +import io.confluent.ksql.planner.LogicalPlanner; +import io.confluent.ksql.planner.plan.FilterNode; +import io.confluent.ksql.planner.plan.PlanNode; +import io.confluent.ksql.planner.plan.ProjectNode; +import io.confluent.ksql.util.KsqlTestUtil; +import io.confluent.ksql.util.SerDeUtil; +import org.apache.kafka.common.serialization.Serdes; +import org.apache.kafka.connect.data.Schema; +import org.apache.kafka.streams.kstream.KStream; +import org.apache.kafka.streams.kstream.KStreamBuilder; +import org.junit.Assert; +import org.junit.Before; +import org.junit.Test; + +import java.util.ArrayList; +import java.util.List; + +public class SchemaKStreamTest { + + private SchemaKStream initialSchemaKStream; + private static final KsqlParser KSQL_PARSER = new KsqlParser(); + + MetaStore metaStore; + KStream kStream; + KsqlStream ksqlStream; + + @Before + public void init() { + metaStore = KsqlTestUtil.getNewMetaStore(); + ksqlStream = (KsqlStream) metaStore.getSource("TEST1"); + KStreamBuilder builder = new KStreamBuilder(); + kStream = builder.stream(Serdes.String(), SerDeUtil.getRowSerDe(ksqlStream.getKsqlTopic() + .getKsqlTopicSerDe(), null), + ksqlStream.getKsqlTopic().getKafkaTopicName()); +// initialSchemaKStream = new SchemaKStream(ksqlStream.getSchema(), kStream, +// ksqlStream.getKeyField(), new ArrayList<>()); + } + + private Analysis analyze(String queryStr) { + List statements = KSQL_PARSER.buildAst(queryStr, metaStore); + System.out.println(SqlFormatterQueryRewrite.formatSql(statements.get(0)).replace("\n", " ")); + // Analyze the query to resolve the references and extract oeprations + Analysis analysis = new Analysis(); + Analyzer analyzer = new Analyzer(analysis, metaStore); + analyzer.process(statements.get(0), new AnalysisContext(null, null)); + return analysis; + } + + private PlanNode buildLogicalPlan(String queryStr) { + List statements = KSQL_PARSER.buildAst(queryStr, metaStore); + // Analyze the query to resolve the references and extract oeprations + Analysis analysis = new Analysis(); + Analyzer analyzer = new Analyzer(analysis, metaStore); + analyzer.process(statements.get(0), new AnalysisContext(null, null)); + AggregateAnalysis aggregateAnalysis = new AggregateAnalysis(); + AggregateAnalyzer aggregateAnalyzer = new AggregateAnalyzer(aggregateAnalysis, metaStore, analysis); + for (Expression expression: analysis.getSelectExpressions()) { + aggregateAnalyzer.process(expression, new AnalysisContext(null, null)); + } + // Build a logical plan + PlanNode logicalPlan = new LogicalPlanner(analysis, aggregateAnalysis).buildPlan(); + return logicalPlan; + } + + @Test + public void testSelectSchemaKStream() throws Exception { + String selectQuery = "SELECT col0, col2, col3 FROM test1 WHERE col0 > 100;"; + PlanNode logicalPlan = buildLogicalPlan(selectQuery); + ProjectNode projectNode = (ProjectNode) logicalPlan.getSources().get(0); + initialSchemaKStream = new SchemaKStream(logicalPlan.getTheSourceNode().getSchema(), kStream, + ksqlStream.getKeyField(), new ArrayList<>(), + SchemaKStream.Type.SOURCE); + SchemaKStream projectedSchemaKStream = initialSchemaKStream.select(projectNode.getProjectNameExpressionPairList()); + Assert.assertTrue(projectedSchemaKStream.getSchema().fields().size() == 3); + Assert.assertTrue(projectedSchemaKStream.getSchema().field("COL0") == + projectedSchemaKStream.getSchema().fields().get(0)); + Assert.assertTrue(projectedSchemaKStream.getSchema().field("COL2") == + projectedSchemaKStream.getSchema().fields().get(1)); + Assert.assertTrue(projectedSchemaKStream.getSchema().field("COL3") == + projectedSchemaKStream.getSchema().fields().get(2)); + + Assert.assertTrue(projectedSchemaKStream.getSchema().field("COL0").schema() == Schema.INT64_SCHEMA); + Assert.assertTrue(projectedSchemaKStream.getSchema().field("COL2").schema() == Schema.STRING_SCHEMA); + Assert.assertTrue(projectedSchemaKStream.getSchema().field("COL3").schema() == Schema.FLOAT64_SCHEMA); + + Assert.assertTrue(projectedSchemaKStream.getSourceSchemaKStreams().get(0) == + initialSchemaKStream); + } + + + @Test + public void testSelectWithExpression() throws Exception { + String selectQuery = "SELECT col0, LEN(UCASE(col2)), col3*3+5 FROM test1 WHERE col0 > 100;"; + PlanNode logicalPlan = buildLogicalPlan(selectQuery); + ProjectNode projectNode = (ProjectNode) logicalPlan.getSources().get(0); + initialSchemaKStream = new SchemaKStream(logicalPlan.getTheSourceNode().getSchema(), kStream, + ksqlStream.getKeyField(), new ArrayList<>(), + SchemaKStream.Type.SOURCE); + SchemaKStream projectedSchemaKStream = initialSchemaKStream.select(projectNode.getProjectNameExpressionPairList()); + Assert.assertTrue(projectedSchemaKStream.getSchema().fields().size() == 3); + Assert.assertTrue(projectedSchemaKStream.getSchema().field("COL0") == + projectedSchemaKStream.getSchema().fields().get(0)); + Assert.assertTrue(projectedSchemaKStream.getSchema().field("KSQL_COL_1") == + projectedSchemaKStream.getSchema().fields().get(1)); + Assert.assertTrue(projectedSchemaKStream.getSchema().field("KSQL_COL_2") == + projectedSchemaKStream.getSchema().fields().get(2)); + + Assert.assertTrue(projectedSchemaKStream.getSchema().field("COL0").schema() == Schema.INT64_SCHEMA); + Assert.assertTrue(projectedSchemaKStream.getSchema().fields().get(1).schema() == Schema + .INT32_SCHEMA); + Assert.assertTrue(projectedSchemaKStream.getSchema().fields().get(2).schema() == Schema + .FLOAT64_SCHEMA); + + Assert.assertTrue(projectedSchemaKStream.getSourceSchemaKStreams().get(0) == + initialSchemaKStream); + } + + @Test + public void testFilter() throws Exception { + String selectQuery = "SELECT col0, col2, col3 FROM test1 WHERE col0 > 100;"; + PlanNode logicalPlan = buildLogicalPlan(selectQuery); + FilterNode filterNode = (FilterNode) logicalPlan.getSources().get(0).getSources().get(0); + + initialSchemaKStream = new SchemaKStream(logicalPlan.getTheSourceNode().getSchema(), kStream, + ksqlStream.getKeyField(), new ArrayList<>(), + SchemaKStream.Type.SOURCE); + SchemaKStream filteredSchemaKStream = initialSchemaKStream.filter(filterNode.getPredicate()); + + Assert.assertTrue(filteredSchemaKStream.getSchema().fields().size() == 6); + Assert.assertTrue(filteredSchemaKStream.getSchema().field("TEST1.COL0") == + filteredSchemaKStream.getSchema().fields().get(0)); + Assert.assertTrue(filteredSchemaKStream.getSchema().field("TEST1.COL1") == + filteredSchemaKStream.getSchema().fields().get(1)); + Assert.assertTrue(filteredSchemaKStream.getSchema().field("TEST1.COL2") == + filteredSchemaKStream.getSchema().fields().get(2)); + Assert.assertTrue(filteredSchemaKStream.getSchema().field("TEST1.COL3") == + filteredSchemaKStream.getSchema().fields().get(3)); + + Assert.assertTrue(filteredSchemaKStream.getSchema().field("TEST1.COL0").schema() == Schema.INT64_SCHEMA); + Assert.assertTrue(filteredSchemaKStream.getSchema().field("TEST1.COL1").schema() == Schema.STRING_SCHEMA); + Assert.assertTrue(filteredSchemaKStream.getSchema().field("TEST1.COL2").schema() == Schema.STRING_SCHEMA); + Assert.assertTrue(filteredSchemaKStream.getSchema().field("TEST1.COL3").schema() == Schema.FLOAT64_SCHEMA); + + Assert.assertTrue(filteredSchemaKStream.getSourceSchemaKStreams().get(0) == + initialSchemaKStream); + } + + @Test + public void testSelectKey() throws Exception { + String selectQuery = "SELECT col0, col2, col3 FROM test1 WHERE col0 > 100;"; + PlanNode logicalPlan = buildLogicalPlan(selectQuery); + FilterNode filterNode = (FilterNode) logicalPlan.getSources().get(0).getSources().get(0); + + initialSchemaKStream = new SchemaKStream(logicalPlan.getTheSourceNode().getSchema(), kStream, + ksqlStream.getKeyField(), new ArrayList<>(), + SchemaKStream.Type.SOURCE); + SchemaKStream rekeyedSchemaKStream = initialSchemaKStream.selectKey(initialSchemaKStream + .getSchema().fields() + .get(1)); + Assert.assertTrue(rekeyedSchemaKStream.getKeyField().name().equalsIgnoreCase("TEST1.COL1")); + + } + +} diff --git a/ksql-core/src/test/java/io/confluent/ksql/structured/SchemaKTableTest.java b/ksql-core/src/test/java/io/confluent/ksql/structured/SchemaKTableTest.java new file mode 100644 index 000000000000..6e79082a5bee --- /dev/null +++ b/ksql-core/src/test/java/io/confluent/ksql/structured/SchemaKTableTest.java @@ -0,0 +1,182 @@ +package io.confluent.ksql.structured; + + +import io.confluent.ksql.analyzer.AggregateAnalysis; +import io.confluent.ksql.analyzer.AggregateAnalyzer; +import io.confluent.ksql.analyzer.Analysis; +import io.confluent.ksql.analyzer.AnalysisContext; +import io.confluent.ksql.analyzer.Analyzer; +import io.confluent.ksql.metastore.KsqlTable; +import io.confluent.ksql.metastore.MetaStore; +import io.confluent.ksql.parser.KsqlParser; +import io.confluent.ksql.parser.rewrite.SqlFormatterQueryRewrite; +import io.confluent.ksql.parser.tree.Expression; +import io.confluent.ksql.parser.tree.Statement; +import io.confluent.ksql.planner.LogicalPlanner; +import io.confluent.ksql.planner.plan.FilterNode; +import io.confluent.ksql.planner.plan.PlanNode; +import io.confluent.ksql.planner.plan.ProjectNode; +import io.confluent.ksql.util.KsqlTestUtil; +import io.confluent.ksql.util.SerDeUtil; +import org.apache.kafka.common.serialization.Serdes; +import org.apache.kafka.connect.data.Schema; +import org.apache.kafka.streams.kstream.KStreamBuilder; +import org.apache.kafka.streams.kstream.KTable; +import org.junit.Assert; +import org.junit.Before; +import org.junit.Test; + +import java.util.ArrayList; +import java.util.List; + +public class SchemaKTableTest { + + private SchemaKTable initialSchemaKTable; + private static final KsqlParser KSQL_PARSER = new KsqlParser(); + + MetaStore metaStore; + KTable kTable; + KsqlTable ksqlTable; + + @Before + public void init() { + metaStore = KsqlTestUtil.getNewMetaStore(); + ksqlTable = (KsqlTable) metaStore.getSource("TEST2"); + KStreamBuilder builder = new KStreamBuilder(); + kTable = builder + .table(Serdes.String(), SerDeUtil.getRowSerDe(ksqlTable.getKsqlTopic().getKsqlTopicSerDe + (), null), ksqlTable.getKsqlTopic().getKafkaTopicName(), + ksqlTable.getStateStoreName()); + + } + + private Analysis analyze(String queryStr) { + List statements = KSQL_PARSER.buildAst(queryStr, metaStore); + System.out.println(SqlFormatterQueryRewrite.formatSql(statements.get(0)) + .replace("\n", " ")); + // Analyze the query to resolve the references and extract oeprations + Analysis analysis = new Analysis(); + Analyzer analyzer = new Analyzer(analysis, metaStore); + analyzer.process(statements.get(0), new AnalysisContext(null, null)); + return analysis; + } + + private PlanNode buildLogicalPlan(String queryStr) { + List statements = KSQL_PARSER.buildAst(queryStr, metaStore); + // Analyze the query to resolve the references and extract oeprations + Analysis analysis = new Analysis(); + Analyzer analyzer = new Analyzer(analysis, metaStore); + analyzer.process(statements.get(0), new AnalysisContext(null, null)); + AggregateAnalysis aggregateAnalysis = new AggregateAnalysis(); + AggregateAnalyzer aggregateAnalyzer = + new AggregateAnalyzer(aggregateAnalysis, metaStore, analysis); + for (Expression expression: analysis.getSelectExpressions()) { + aggregateAnalyzer.process(expression, new AnalysisContext(null, null)); + } + // Build a logical plan + PlanNode logicalPlan = new LogicalPlanner(analysis, aggregateAnalysis).buildPlan(); + return logicalPlan; + } + + @Test + public void testSelectSchemaKStream() throws Exception { + String selectQuery = "SELECT col0, col2, col3 FROM test1 WHERE col0 > 100;"; + PlanNode logicalPlan = buildLogicalPlan(selectQuery); + ProjectNode projectNode = (ProjectNode) logicalPlan.getSources().get(0); + initialSchemaKTable = new SchemaKTable(logicalPlan.getTheSourceNode().getSchema(), + kTable, + ksqlTable.getKeyField(), new ArrayList<>(), + false, + SchemaKStream.Type.SOURCE); + SchemaKTable projectedSchemaKStream = initialSchemaKTable + .select(projectNode.getProjectNameExpressionPairList()); + Assert.assertTrue(projectedSchemaKStream.getSchema().fields().size() == 3); + Assert.assertTrue(projectedSchemaKStream.getSchema().field("COL0") == + projectedSchemaKStream.getSchema().fields().get(0)); + Assert.assertTrue(projectedSchemaKStream.getSchema().field("COL2") == + projectedSchemaKStream.getSchema().fields().get(1)); + Assert.assertTrue(projectedSchemaKStream.getSchema().field("COL3") == + projectedSchemaKStream.getSchema().fields().get(2)); + + Assert.assertTrue(projectedSchemaKStream.getSchema() + .field("COL0").schema() == Schema.INT64_SCHEMA); + Assert.assertTrue(projectedSchemaKStream.getSchema() + .field("COL2").schema() == Schema.STRING_SCHEMA); + Assert.assertTrue(projectedSchemaKStream.getSchema() + .field("COL3").schema() == Schema.FLOAT64_SCHEMA); + + Assert.assertTrue(projectedSchemaKStream.getSourceSchemaKStreams().get(0) == + initialSchemaKTable); + } + + + @Test + public void testSelectWithExpression() throws Exception { + String selectQuery = "SELECT col0, LEN(UCASE(col2)), col3*3+5 FROM test1 WHERE col0 > 100;"; + PlanNode logicalPlan = buildLogicalPlan(selectQuery); + ProjectNode projectNode = (ProjectNode) logicalPlan.getSources().get(0); + initialSchemaKTable = new SchemaKTable(logicalPlan.getTheSourceNode().getSchema(), + kTable, + ksqlTable.getKeyField(), + new ArrayList<>(), false, + SchemaKStream.Type.SOURCE); + SchemaKTable projectedSchemaKStream = initialSchemaKTable + .select(projectNode.getProjectNameExpressionPairList()); + Assert.assertTrue(projectedSchemaKStream.getSchema().fields().size() == 3); + Assert.assertTrue(projectedSchemaKStream.getSchema().field("COL0") == + projectedSchemaKStream.getSchema().fields().get(0)); + Assert.assertTrue(projectedSchemaKStream.getSchema() + .field("KSQL_COL_1") == + projectedSchemaKStream.getSchema().fields().get(1)); + Assert.assertTrue(projectedSchemaKStream.getSchema() + .field("KSQL_COL_2") == + projectedSchemaKStream.getSchema().fields().get(2)); + + Assert.assertTrue(projectedSchemaKStream.getSchema() + .field("COL0").schema() == Schema.INT64_SCHEMA); + Assert.assertTrue(projectedSchemaKStream.getSchema().fields().get(1).schema() == Schema + .INT32_SCHEMA); + Assert.assertTrue(projectedSchemaKStream.getSchema().fields().get(2).schema() == Schema + .FLOAT64_SCHEMA); + + Assert.assertTrue(projectedSchemaKStream.getSourceSchemaKStreams().get(0) == + initialSchemaKTable); + } + + @Test + public void testFilter() throws Exception { + String selectQuery = "SELECT col0, col2, col3 FROM test1 WHERE col0 > 100;"; + PlanNode logicalPlan = buildLogicalPlan(selectQuery); + FilterNode filterNode = (FilterNode) logicalPlan.getSources().get(0).getSources().get(0); + + initialSchemaKTable = new SchemaKTable(logicalPlan.getTheSourceNode().getSchema(), + kTable, + ksqlTable.getKeyField(), new ArrayList<>(), + false, + SchemaKStream.Type.SOURCE); + SchemaKTable filteredSchemaKStream = initialSchemaKTable.filter(filterNode.getPredicate()); + + Assert.assertTrue(filteredSchemaKStream.getSchema().fields().size() == 6); + Assert.assertTrue(filteredSchemaKStream.getSchema().field("TEST1.COL0") == + filteredSchemaKStream.getSchema().fields().get(0)); + Assert.assertTrue(filteredSchemaKStream.getSchema().field("TEST1.COL1") == + filteredSchemaKStream.getSchema().fields().get(1)); + Assert.assertTrue(filteredSchemaKStream.getSchema().field("TEST1.COL2") == + filteredSchemaKStream.getSchema().fields().get(2)); + Assert.assertTrue(filteredSchemaKStream.getSchema().field("TEST1.COL3") == + filteredSchemaKStream.getSchema().fields().get(3)); + + Assert.assertTrue(filteredSchemaKStream.getSchema() + .field("TEST1.COL0").schema() == Schema.INT64_SCHEMA); + Assert.assertTrue(filteredSchemaKStream.getSchema() + .field("TEST1.COL1").schema() == Schema.STRING_SCHEMA); + Assert.assertTrue(filteredSchemaKStream.getSchema() + .field("TEST1.COL2").schema() == Schema.STRING_SCHEMA); + Assert.assertTrue(filteredSchemaKStream.getSchema() + .field("TEST1.COL3").schema() == Schema.FLOAT64_SCHEMA); + + Assert.assertTrue(filteredSchemaKStream.getSourceSchemaKStreams().get(0) == + initialSchemaKTable); + } + +} diff --git a/ksql-core/src/test/java/io/confluent/ksql/structured/SqlPredicateTest.java b/ksql-core/src/test/java/io/confluent/ksql/structured/SqlPredicateTest.java new file mode 100644 index 000000000000..43c06e601bee --- /dev/null +++ b/ksql-core/src/test/java/io/confluent/ksql/structured/SqlPredicateTest.java @@ -0,0 +1,118 @@ +package io.confluent.ksql.structured; + + +import io.confluent.ksql.analyzer.AggregateAnalysis; +import io.confluent.ksql.analyzer.AggregateAnalyzer; +import io.confluent.ksql.analyzer.Analysis; +import io.confluent.ksql.analyzer.AnalysisContext; +import io.confluent.ksql.analyzer.Analyzer; +import io.confluent.ksql.metastore.KsqlStream; +import io.confluent.ksql.metastore.MetaStore; +import io.confluent.ksql.parser.KsqlParser; +import io.confluent.ksql.parser.rewrite.SqlFormatterQueryRewrite; +import io.confluent.ksql.parser.tree.Expression; +import io.confluent.ksql.parser.tree.Statement; +import io.confluent.ksql.planner.LogicalPlanner; +import io.confluent.ksql.planner.plan.FilterNode; +import io.confluent.ksql.planner.plan.PlanNode; +import io.confluent.ksql.util.KsqlTestUtil; +import io.confluent.ksql.util.SerDeUtil; +import org.apache.kafka.common.serialization.Serdes; +import org.apache.kafka.streams.kstream.KStream; +import org.apache.kafka.streams.kstream.KStreamBuilder; +import org.junit.Assert; +import org.junit.Before; +import org.junit.Test; + +import java.util.ArrayList; +import java.util.List; + +public class SqlPredicateTest { + private SchemaKStream initialSchemaKStream; + private static final KsqlParser KSQL_PARSER = new KsqlParser(); + + MetaStore metaStore; + KStream kStream; + KsqlStream ksqlStream; + + @Before + public void init() { + metaStore = KsqlTestUtil.getNewMetaStore(); + ksqlStream = (KsqlStream) metaStore.getSource("TEST1"); + KStreamBuilder builder = new KStreamBuilder(); + kStream = builder.stream(Serdes.String(), + SerDeUtil.getRowSerDe(ksqlStream.getKsqlTopic().getKsqlTopicSerDe(), + null), + ksqlStream.getKsqlTopic().getKafkaTopicName()); + } + + private Analysis analyze(String queryStr) { + List statements = KSQL_PARSER.buildAst(queryStr, metaStore); + System.out.println(SqlFormatterQueryRewrite.formatSql(statements.get(0)) + .replace("\n", " ")); + // Analyze the query to resolve the references and extract oeprations + Analysis analysis = new Analysis(); + Analyzer analyzer = new Analyzer(analysis, metaStore); + analyzer.process(statements.get(0), new AnalysisContext(null, null)); + return analysis; + } + + private PlanNode buildLogicalPlan(String queryStr) { + List statements = KSQL_PARSER.buildAst(queryStr, metaStore); + // Analyze the query to resolve the references and extract oeprations + Analysis analysis = new Analysis(); + Analyzer analyzer = new Analyzer(analysis, metaStore); + analyzer.process(statements.get(0), new AnalysisContext(null, null)); + AggregateAnalysis aggregateAnalysis = new AggregateAnalysis(); + AggregateAnalyzer aggregateAnalyzer = new AggregateAnalyzer(aggregateAnalysis, + metaStore, analysis); + for (Expression expression: analysis.getSelectExpressions()) { + aggregateAnalyzer.process(expression, new AnalysisContext(null, null)); + } + // Build a logical plan + PlanNode logicalPlan = new LogicalPlanner(analysis, aggregateAnalysis).buildPlan(); + return logicalPlan; + } + + @Test + public void testFilter() throws Exception { + String selectQuery = "SELECT col0, col2, col3 FROM test1 WHERE col0 > 100;"; + PlanNode logicalPlan = buildLogicalPlan(selectQuery); + FilterNode filterNode = (FilterNode) logicalPlan.getSources().get(0).getSources().get(0); + + initialSchemaKStream = new SchemaKStream(logicalPlan.getTheSourceNode().getSchema(), + kStream, + ksqlStream.getKeyField(), new ArrayList<>(), + SchemaKStream.Type.SOURCE); + SqlPredicate predicate = new SqlPredicate(filterNode.getPredicate(), initialSchemaKStream + .getSchema(), false); + + Assert.assertTrue(predicate.getFilterExpression() + .toString().equalsIgnoreCase("(TEST1.COL0 > 100)")); + Assert.assertTrue(predicate.getColumnIndexes().length == 1); + + } + + @Test + public void testFilterBiggerExpression() throws Exception { + String selectQuery = "SELECT col0, col2, col3 FROM test1 WHERE col0 > 100 AND LEN(col2) = 5;"; + PlanNode logicalPlan = buildLogicalPlan(selectQuery); + FilterNode filterNode = (FilterNode) logicalPlan.getSources().get(0).getSources().get(0); + + initialSchemaKStream = new SchemaKStream(logicalPlan.getTheSourceNode().getSchema(), + kStream, + ksqlStream.getKeyField(), new ArrayList<>(), + SchemaKStream.Type.SOURCE); + SqlPredicate predicate = new SqlPredicate(filterNode.getPredicate(), initialSchemaKStream + .getSchema(), false); + + Assert.assertTrue(predicate + .getFilterExpression() + .toString() + .equalsIgnoreCase("((TEST1.COL0 > 100) AND" + + " (LEN(TEST1.COL2) = 5))")); + Assert.assertTrue(predicate.getColumnIndexes().length == 3); + + } + +} diff --git a/ksql-core/src/test/java/io/confluent/ksql/testutils/EmbeddedSingleNodeKafkaCluster.java b/ksql-core/src/test/java/io/confluent/ksql/testutils/EmbeddedSingleNodeKafkaCluster.java new file mode 100644 index 000000000000..c822112e5dbe --- /dev/null +++ b/ksql-core/src/test/java/io/confluent/ksql/testutils/EmbeddedSingleNodeKafkaCluster.java @@ -0,0 +1,150 @@ +package io.confluent.ksql.testutils; + +import kafka.server.KafkaConfig$; +import org.junit.rules.ExternalResource; +import org.slf4j.Logger; +import org.slf4j.LoggerFactory; + +import java.io.IOException; +import java.util.Properties; + +/** + * Runs an in-memory, "embedded" Kafka cluster with 1 ZooKeeper instance and 1 Kafka broker. + */ +public class EmbeddedSingleNodeKafkaCluster extends ExternalResource { + + private static final Logger log = LoggerFactory.getLogger(EmbeddedSingleNodeKafkaCluster.class); + private static final int DEFAULT_BROKER_PORT = 0; // 0 results in a random port being selected + + private ZooKeeperEmbedded zookeeper; + private KafkaEmbedded broker; + private final Properties brokerConfig; + + /** + * Creates and starts a Kafka cluster. + */ + public EmbeddedSingleNodeKafkaCluster() { + this(new Properties()); + } + + /** + * Creates and starts a Kafka cluster. + * + * @param brokerConfig Additional broker configuration settings. + */ + public EmbeddedSingleNodeKafkaCluster(Properties brokerConfig) { + this.brokerConfig = new Properties(); + this.brokerConfig.putAll(brokerConfig); + } + + /** + * Creates and starts a Kafka cluster. + */ + public void start() throws Exception { + log.debug("Initiating embedded Kafka cluster startup"); + log.debug("Starting a ZooKeeper instance..."); + zookeeper = new ZooKeeperEmbedded(); + log.debug("ZooKeeper instance is running at {}", zookeeper.connectString()); + + Properties effectiveBrokerConfig = effectiveBrokerConfigFrom(brokerConfig, zookeeper); + log.debug("Starting a Kafka instance on port {} ...", + effectiveBrokerConfig.getProperty(KafkaConfig$.MODULE$.PortProp())); + broker = new KafkaEmbedded(effectiveBrokerConfig); + log.debug("Kafka instance is running at {}, connected to ZooKeeper at {}", + broker.brokerList(), broker.zookeeperConnect()); + } + + private Properties effectiveBrokerConfigFrom(Properties brokerConfig, ZooKeeperEmbedded zookeeper) { + Properties effectiveConfig = new Properties(); + effectiveConfig.putAll(brokerConfig); + effectiveConfig.put(KafkaConfig$.MODULE$.ZkConnectProp(), zookeeper.connectString()); + effectiveConfig.put(KafkaConfig$.MODULE$.PortProp(), DEFAULT_BROKER_PORT); + effectiveConfig.put(KafkaConfig$.MODULE$.DeleteTopicEnableProp(), true); + effectiveConfig.put(KafkaConfig$.MODULE$.LogCleanerDedupeBufferSizeProp(), 2 * 1024 * 1024L); + effectiveConfig.put(KafkaConfig$.MODULE$.OffsetsTopicReplicationFactorProp(), (short) 1); + return effectiveConfig; + } + + @Override + protected void before() throws Exception { + start(); + } + + @Override + protected void after() { + stop(); + } + + /** + * Stop the Kafka cluster. + */ + public void stop() { + + if (broker != null) { + broker.stop(); + } + try { + if (zookeeper != null) { + zookeeper.stop(); + } + } catch (IOException e) { + throw new RuntimeException(e); + } + } + + /** + * This cluster's `bootstrap.servers` value. Example: `127.0.0.1:9092`. + * + * You can use this to tell Kafka producers how to connect to this cluster. + */ + public String bootstrapServers() { + return broker.brokerList(); + } + + /** + * This cluster's ZK connection string aka `zookeeper.connect` in `hostnameOrIp:port` format. + * Example: `127.0.0.1:2181`. + * + * You can use this to e.g. tell Kafka consumers how to connect to this cluster. + */ + public String zookeeperConnect() { + return zookeeper.connectString(); + } + + + /** + * Create a Kafka topic with 1 partition and a replication factor of 1. + * + * @param topic The name of the topic. + */ + public void createTopic(String topic) { + createTopic(topic, 1, 1, new Properties()); + } + + /** + * Create a Kafka topic with the given parameters. + * + * @param topic The name of the topic. + * @param partitions The number of partitions for this topic. + * @param replication The replication factor for (the partitions of) this topic. + */ + public void createTopic(String topic, int partitions, int replication) { + createTopic(topic, partitions, replication, new Properties()); + } + + /** + * Create a Kafka topic with the given parameters. + * + * @param topic The name of the topic. + * @param partitions The number of partitions for this topic. + * @param replication The replication factor for (partitions of) this topic. + * @param topicConfig Additional topic-level configuration settings. + */ + public void createTopic(String topic, + int partitions, + int replication, + Properties topicConfig) { + broker.createTopic(topic, partitions, replication, topicConfig); + } + +} \ No newline at end of file diff --git a/ksql-core/src/test/java/io/confluent/ksql/testutils/IntegrationTestUtils.java b/ksql-core/src/test/java/io/confluent/ksql/testutils/IntegrationTestUtils.java new file mode 100644 index 000000000000..743e24ab659c --- /dev/null +++ b/ksql-core/src/test/java/io/confluent/ksql/testutils/IntegrationTestUtils.java @@ -0,0 +1,198 @@ +/** + * Copyright 2016 Confluent Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except + * in compliance with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software distributed under the License + * is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express + * or implied. See the License for the specific language governing permissions and limitations under + * the License. + */ + +package io.confluent.ksql.testutils; + +import org.apache.kafka.clients.consumer.ConsumerRecord; +import org.apache.kafka.clients.consumer.ConsumerRecords; +import org.apache.kafka.clients.consumer.KafkaConsumer; +import org.apache.kafka.clients.producer.KafkaProducer; +import org.apache.kafka.clients.producer.Producer; +import org.apache.kafka.clients.producer.ProducerRecord; +import org.apache.kafka.clients.producer.RecordMetadata; +import org.apache.kafka.streams.KeyValue; + +import java.util.ArrayList; +import java.util.Collection; +import java.util.Collections; +import java.util.List; +import java.util.Properties; +import java.util.concurrent.ExecutionException; +import java.util.concurrent.Future; +import java.util.stream.Collectors; + +/** + * Utility functions to make integration testing more convenient. + */ +public class IntegrationTestUtils { + + private static final int UNLIMITED_MESSAGES = -1; + public static final long DEFAULT_TIMEOUT = 30 * 1000L; + + /** + * Returns up to `maxMessages` message-values from the topic. + * + * @param topic Kafka topic to read messages from + * @param consumerConfig Kafka consumer configuration + * @param maxMessages Maximum number of messages to read via the consumer. + * @return The values retrieved via the consumer. + */ + public static List readValues(String topic, Properties consumerConfig, int maxMessages) { + List> kvs = readKeyValues(topic, consumerConfig, maxMessages); + return kvs.stream().map(kv -> kv.value).collect(Collectors.toList()); + } + + /** + * Returns as many messages as possible from the topic until a (currently hardcoded) timeout is + * reached. + * + * @param topic Kafka topic to read messages from + * @param consumerConfig Kafka consumer configuration + * @return The KeyValue elements retrieved via the consumer. + */ + public static List> readKeyValues(String topic, Properties consumerConfig) { + return readKeyValues(topic, consumerConfig, UNLIMITED_MESSAGES); + } + + /** + * Returns up to `maxMessages` by reading via the provided consumer (the topic(s) to read from are + * already configured in the consumer). + * + * @param topic Kafka topic to read messages from + * @param consumerConfig Kafka consumer configuration + * @param maxMessages Maximum number of messages to read via the consumer + * @return The KeyValue elements retrieved via the consumer + */ + public static List> readKeyValues(String topic, Properties consumerConfig, int maxMessages) { + KafkaConsumer consumer = new KafkaConsumer<>(consumerConfig); + consumer.subscribe(Collections.singletonList(topic)); + int pollIntervalMs = 100; + int maxTotalPollTimeMs = 2000; + int totalPollTimeMs = 0; + List> consumedValues = new ArrayList<>(); + while (totalPollTimeMs < maxTotalPollTimeMs && continueConsuming(consumedValues.size(), maxMessages)) { + totalPollTimeMs += pollIntervalMs; + ConsumerRecords records = consumer.poll(pollIntervalMs); + for (ConsumerRecord record : records) { + consumedValues.add(new KeyValue<>(record.key(), record.value())); + } + } + consumer.close(); + return consumedValues; + } + + private static boolean continueConsuming(int messagesConsumed, int maxMessages) { + return maxMessages <= 0 || messagesConsumed < maxMessages; + } + + /** + * @param topic Kafka topic to write the data records to + * @param records Data records to write to Kafka + * @param producerConfig Kafka producer configuration + * @param Key type of the data records + * @param Value type of the data records + */ + public static void produceKeyValuesSynchronously( + String topic, Collection> records, Properties producerConfig) + throws ExecutionException, InterruptedException { + Producer producer = new KafkaProducer<>(producerConfig); + for (KeyValue record : records) { + Future f = producer.send( + new ProducerRecord<>(topic, record.key, record.value)); + f.get(); + } + producer.flush(); + producer.close(); + } + + public static void produceValuesSynchronously( + String topic, Collection records, Properties producerConfig) + throws ExecutionException, InterruptedException { + Collection> keyedRecords = + records.stream().map(record -> new KeyValue<>(null, record)).collect(Collectors.toList()); + produceKeyValuesSynchronously(topic, keyedRecords, producerConfig); + } + + public static List> waitUntilMinKeyValueRecordsReceived(Properties consumerConfig, + String topic, + int expectedNumRecords) throws InterruptedException { + + return waitUntilMinKeyValueRecordsReceived(consumerConfig, topic, expectedNumRecords, DEFAULT_TIMEOUT); + } + + /** + * Wait until enough data (key-value records) has been consumed. + * @param consumerConfig Kafka Consumer configuration + * @param topic Topic to consume from + * @param expectedNumRecords Minimum number of expected records + * @param waitTime Upper bound in waiting time in milliseconds + * @return All the records consumed, or null if no records are consumed + * @throws InterruptedException + * @throws AssertionError if the given wait time elapses + */ + public static List> waitUntilMinKeyValueRecordsReceived(Properties consumerConfig, + String topic, + int expectedNumRecords, + long waitTime) throws InterruptedException { + List> accumData = new ArrayList<>(); + long startTime = System.currentTimeMillis(); + while (true) { + List> readData = readKeyValues(topic, consumerConfig); + accumData.addAll(readData); + if (accumData.size() >= expectedNumRecords) + return accumData; + if (System.currentTimeMillis() > startTime + waitTime) + throw new AssertionError("Expected " + expectedNumRecords + + " but received only " + accumData.size() + + " records before timeout " + waitTime + " ms"); + Thread.sleep(Math.min(waitTime, 100L)); + } + } + + public static List waitUntilMinValuesRecordsReceived(Properties consumerConfig, + String topic, + int expectedNumRecords) throws InterruptedException { + + return waitUntilMinValuesRecordsReceived(consumerConfig, topic, expectedNumRecords, DEFAULT_TIMEOUT); + } + + /** + * Wait until enough data (value records) has been consumed. + * @param consumerConfig Kafka Consumer configuration + * @param topic Topic to consume from + * @param expectedNumRecords Minimum number of expected records + * @param waitTime Upper bound in waiting time in milliseconds + * @return All the records consumed, or null if no records are consumed + * @throws InterruptedException + * @throws AssertionError if the given wait time elapses + */ + public static List waitUntilMinValuesRecordsReceived(Properties consumerConfig, + String topic, + int expectedNumRecords, + long waitTime) throws InterruptedException { + List accumData = new ArrayList<>(); + long startTime = System.currentTimeMillis(); + while (true) { + List readData = readValues(topic, consumerConfig, expectedNumRecords); + accumData.addAll(readData); + if (accumData.size() >= expectedNumRecords) + return accumData; + if (System.currentTimeMillis() > startTime + waitTime) + throw new AssertionError("Expected " + expectedNumRecords + + " but received only " + accumData.size() + + " records before timeout " + waitTime + " ms"); + Thread.sleep(Math.min(waitTime, 100L)); + } + } +} diff --git a/ksql-core/src/test/java/io/confluent/ksql/testutils/KafkaEmbedded.java b/ksql-core/src/test/java/io/confluent/ksql/testutils/KafkaEmbedded.java new file mode 100644 index 000000000000..19708c3ead3d --- /dev/null +++ b/ksql-core/src/test/java/io/confluent/ksql/testutils/KafkaEmbedded.java @@ -0,0 +1,163 @@ +package io.confluent.ksql.testutils; + +import kafka.admin.AdminUtils; +import kafka.admin.RackAwareMode; +import kafka.server.KafkaConfig; +import kafka.server.KafkaConfig$; +import kafka.server.KafkaServer; +import kafka.utils.TestUtils; +import kafka.utils.ZKStringSerializer$; +import kafka.utils.ZkUtils; +import org.I0Itec.zkclient.ZkClient; +import org.I0Itec.zkclient.ZkConnection; +import org.apache.kafka.common.network.ListenerName; +import org.apache.kafka.common.utils.SystemTime; +import org.junit.rules.TemporaryFolder; +import org.slf4j.Logger; +import org.slf4j.LoggerFactory; + +import java.io.File; +import java.io.IOException; +import java.util.Properties; + +/** + * Runs an in-memory, "embedded" instance of a Kafka broker, which listens at `127.0.0.1:9092` by + * default. + * + * Requires a running ZooKeeper instance to connect to. By default, it expects a ZooKeeper instance + * running at `127.0.0.1:2181`. You can specify a different ZooKeeper instance by setting the + * `zookeeper.connect` parameter in the broker's configuration. + */ +public class KafkaEmbedded { + + private static final Logger log = LoggerFactory.getLogger(KafkaEmbedded.class); + + private static final String DEFAULT_ZK_CONNECT = "127.0.0.1:2181"; + private static final int DEFAULT_ZK_SESSION_TIMEOUT_MS = 10 * 1000; + private static final int DEFAULT_ZK_CONNECTION_TIMEOUT_MS = 8 * 1000; + + private final Properties effectiveConfig; + private final File logDir; + private final TemporaryFolder tmpFolder; + private final KafkaServer kafka; + + /** + * Creates and starts an embedded Kafka broker. + * + * @param config Broker configuration settings. Used to modify, for example, on which port the + * broker should listen to. Note that you cannot change some settings such as + * `log.dirs`, `port`. + */ + public KafkaEmbedded(Properties config) throws IOException { + tmpFolder = new TemporaryFolder(); + tmpFolder.create(); + logDir = tmpFolder.newFolder(); + effectiveConfig = effectiveConfigFrom(config); + boolean loggingEnabled = true; + + KafkaConfig kafkaConfig = new KafkaConfig(effectiveConfig, loggingEnabled); + log.debug("Starting embedded Kafka broker (with log.dirs={} and ZK ensemble at {}) ...", + logDir, zookeeperConnect()); + kafka = TestUtils.createServer(kafkaConfig, new SystemTime()); + log.debug("Startup of embedded Kafka broker at {} completed (with ZK ensemble at {}) ...", + brokerList(), zookeeperConnect()); + } + + private Properties effectiveConfigFrom(Properties initialConfig) throws IOException { + Properties effectiveConfig = new Properties(); + effectiveConfig.put(KafkaConfig$.MODULE$.BrokerIdProp(), 0); + effectiveConfig.put(KafkaConfig$.MODULE$.HostNameProp(), "127.0.0.1"); + effectiveConfig.put(KafkaConfig$.MODULE$.PortProp(), "9092"); + effectiveConfig.put(KafkaConfig$.MODULE$.NumPartitionsProp(), 1); + effectiveConfig.put(KafkaConfig$.MODULE$.AutoCreateTopicsEnableProp(), true); + effectiveConfig.put(KafkaConfig$.MODULE$.MessageMaxBytesProp(), 1000000); + effectiveConfig.put(KafkaConfig$.MODULE$.ControlledShutdownEnableProp(), true); + + effectiveConfig.putAll(initialConfig); + effectiveConfig.setProperty(KafkaConfig$.MODULE$.LogDirProp(), logDir.getAbsolutePath()); + return effectiveConfig; + } + + /** + * This broker's `metadata.broker.list` value. Example: `127.0.0.1:9092`. + * + * You can use this to tell Kafka producers and consumers how to connect to this instance. + */ + public String brokerList() { + ListenerName listenerName = kafka.config().advertisedListeners().apply(0).listenerName(); + return String.join(":", kafka.config().hostName(), Integer.toString(kafka.boundPort + (listenerName))); + } + + + /** + * The ZooKeeper connection string aka `zookeeper.connect`. + */ + public String zookeeperConnect() { + return effectiveConfig.getProperty("zookeeper.connect", DEFAULT_ZK_CONNECT); + } + + /** + * Stop the broker. + */ + public void stop() { + log.debug("Shutting down embedded Kafka broker at {} (with ZK ensemble at {}) ...", + brokerList(), zookeeperConnect()); + kafka.shutdown(); + kafka.awaitShutdown(); + log.debug("Removing temp folder {} with logs.dir at {} ...", tmpFolder, logDir); + tmpFolder.delete(); + log.debug("Shutdown of embedded Kafka broker at {} completed (with ZK ensemble at {}) ...", + brokerList(), zookeeperConnect()); + } + + /** + * Create a Kafka topic with 1 partition and a replication factor of 1. + * + * @param topic The name of the topic. + */ + public void createTopic(String topic) { + createTopic(topic, 1, 1, new Properties()); + } + + /** + * Create a Kafka topic with the given parameters. + * + * @param topic The name of the topic. + * @param partitions The number of partitions for this topic. + * @param replication The replication factor for (the partitions of) this topic. + */ + public void createTopic(String topic, int partitions, int replication) { + createTopic(topic, partitions, replication, new Properties()); + } + + /** + * Create a Kafka topic with the given parameters. + * + * @param topic The name of the topic. + * @param partitions The number of partitions for this topic. + * @param replication The replication factor for (partitions of) this topic. + * @param topicConfig Additional topic-level configuration settings. + */ + public void createTopic(String topic, + int partitions, + int replication, + Properties topicConfig) { + log.debug("Creating topic { name: {}, partitions: {}, replication: {}, config: {} }", + topic, partitions, replication, topicConfig); + // Note: You must initialize the ZkClient with ZKStringSerializer. If you don't, then + // registerTopic() will only seem to work (it will return without error). The topic will exist in + // only ZooKeeper and will be returned when listing topics, but Kafka itself does not create the + // topic. + ZkClient zkClient = new ZkClient( + zookeeperConnect(), + DEFAULT_ZK_SESSION_TIMEOUT_MS, + DEFAULT_ZK_CONNECTION_TIMEOUT_MS, + ZKStringSerializer$.MODULE$); + boolean isSecure = false; + ZkUtils zkUtils = new ZkUtils(zkClient, new ZkConnection(zookeeperConnect()), isSecure); + AdminUtils.createTopic(zkUtils, topic, partitions, replication, topicConfig, RackAwareMode.Enforced$.MODULE$); + zkClient.close(); + } + +} \ No newline at end of file diff --git a/ksql-core/src/test/java/io/confluent/ksql/testutils/ZooKeeperEmbedded.java b/ksql-core/src/test/java/io/confluent/ksql/testutils/ZooKeeperEmbedded.java new file mode 100644 index 000000000000..4c0a8dc255ea --- /dev/null +++ b/ksql-core/src/test/java/io/confluent/ksql/testutils/ZooKeeperEmbedded.java @@ -0,0 +1,56 @@ +package io.confluent.ksql.testutils; + +import org.apache.curator.test.TestingServer; +import org.slf4j.Logger; +import org.slf4j.LoggerFactory; + +import java.io.IOException; + +/** + * Runs an in-memory, "embedded" instance of a ZooKeeper server. + * + * The ZooKeeper server instance is automatically started when you create a new instance of this class. + */ +public class ZooKeeperEmbedded { + + private static final Logger log = LoggerFactory.getLogger(ZooKeeperEmbedded.class); + + private final TestingServer server; + + /** + * Creates and starts a ZooKeeper instance. + * + * @throws Exception + */ + public ZooKeeperEmbedded() throws Exception { + log.debug("Starting embedded ZooKeeper server..."); + this.server = new TestingServer(); + log.debug("Embedded ZooKeeper server at {} uses the temp directory at {}", + server.getConnectString(), server.getTempDirectory()); + } + + public void stop() throws IOException { + log.debug("Shutting down embedded ZooKeeper server at {} ...", server.getConnectString()); + server.close(); + log.debug("Shutdown of embedded ZooKeeper server at {} completed", server.getConnectString()); + } + + /** + * The ZooKeeper connection string aka `zookeeper.connect` in `hostnameOrIp:port` format. + * Example: `127.0.0.1:2181`. + * + * You can use this to e.g. tell Kafka brokers how to connect to this instance. + */ + public String connectString() { + return server.getConnectString(); + } + + /** + * The hostname of the ZooKeeper instance. Example: `127.0.0.1` + */ + public String hostname() { + // "server:1:2:3" -> "server:1:2" + return connectString().substring(0, connectString().lastIndexOf(':')); + } + +} \ No newline at end of file diff --git a/ksql-core/src/test/java/io/confluent/ksql/util/ExpressionTypeManagerTest.java b/ksql-core/src/test/java/io/confluent/ksql/util/ExpressionTypeManagerTest.java new file mode 100644 index 000000000000..4ab3670fcda8 --- /dev/null +++ b/ksql-core/src/test/java/io/confluent/ksql/util/ExpressionTypeManagerTest.java @@ -0,0 +1,106 @@ +package io.confluent.ksql.util; + + +import io.confluent.ksql.analyzer.Analysis; +import io.confluent.ksql.analyzer.AnalysisContext; +import io.confluent.ksql.analyzer.Analyzer; +import io.confluent.ksql.metastore.MetaStore; +import io.confluent.ksql.parser.KsqlParser; +import io.confluent.ksql.parser.tree.Statement; +import org.apache.kafka.connect.data.Schema; +import org.apache.kafka.connect.data.SchemaBuilder; +import org.junit.Assert; +import org.junit.Before; +import org.junit.Test; + +import java.util.List; + +public class ExpressionTypeManagerTest { + + private static final KsqlParser KSQL_PARSER = new KsqlParser(); + private MetaStore metaStore; + private Schema schema; + + @Before + public void init() { + metaStore = KsqlTestUtil.getNewMetaStore(); + schema = SchemaBuilder.struct() + .field("TEST1.COL0", SchemaBuilder.INT64_SCHEMA) + .field("TEST1.COL1", SchemaBuilder.STRING_SCHEMA) + .field("TEST1.COL2", SchemaBuilder.STRING_SCHEMA) + .field("TEST1.COL3", SchemaBuilder.FLOAT64_SCHEMA); + } + + private Analysis analyzeQuery(String queryStr) { + List statements = KSQL_PARSER.buildAst(queryStr, metaStore); + // Analyze the query to resolve the references and extract oeprations + Analysis analysis = new Analysis(); + Analyzer analyzer = new Analyzer(analysis, metaStore); + analyzer.process(statements.get(0), new AnalysisContext(null, null)); + return analysis; + } + + @Test + public void testArithmaticExpr() throws Exception { + String simpleQuery = "SELECT col0+col3, col2, col3+10, col0+10, col0*25 FROM test1 WHERE col0 > 100;"; + Analysis analysis = analyzeQuery(simpleQuery); + ExpressionTypeManager expressionTypeManager = new ExpressionTypeManager(schema); + Schema exprType0 = expressionTypeManager.getExpressionType(analysis.getSelectExpressions().get(0)); + Schema exprType2 = expressionTypeManager.getExpressionType(analysis.getSelectExpressions().get(2)); + Schema exprType3 = expressionTypeManager.getExpressionType(analysis.getSelectExpressions().get(3)); + Schema exprType4 = expressionTypeManager.getExpressionType(analysis.getSelectExpressions().get(4)); + Assert.assertTrue(exprType0 == Schema.FLOAT64_SCHEMA); + Assert.assertTrue(exprType2 == Schema.FLOAT64_SCHEMA); + Assert.assertTrue(exprType3 == Schema.INT64_SCHEMA); + Assert.assertTrue(exprType4 == Schema.INT64_SCHEMA); + } + + @Test + public void testComparisonExpr() throws Exception { + String simpleQuery = "SELECT col0>col3, col0*25<200, col2 = 'test' FROM test1;"; + Analysis analysis = analyzeQuery(simpleQuery); + ExpressionTypeManager expressionTypeManager = new ExpressionTypeManager(schema); + Schema exprType0 = expressionTypeManager.getExpressionType(analysis.getSelectExpressions().get(0)); + Schema exprType1 = expressionTypeManager.getExpressionType(analysis.getSelectExpressions().get(1)); + Schema exprType2 = expressionTypeManager.getExpressionType(analysis.getSelectExpressions().get(2)); + Assert.assertTrue(exprType0 == Schema.BOOLEAN_SCHEMA); + Assert.assertTrue(exprType1 == Schema.BOOLEAN_SCHEMA); + Assert.assertTrue(exprType2 == Schema.BOOLEAN_SCHEMA); + } + + @Test + public void testUDFExpr() throws Exception { + String simpleQuery = "SELECT FLOOR(col3), CEIL(col3*3), ABS(col0+1.34), RANDOM()+10, ROUND(col3*2)+12 FROM test1;"; + Analysis analysis = analyzeQuery(simpleQuery); + ExpressionTypeManager expressionTypeManager = new ExpressionTypeManager(schema); + Schema exprType0 = expressionTypeManager.getExpressionType(analysis.getSelectExpressions().get(0)); + Schema exprType1 = expressionTypeManager.getExpressionType(analysis.getSelectExpressions().get(1)); + Schema exprType2 = expressionTypeManager.getExpressionType(analysis.getSelectExpressions().get(2)); + Schema exprType3 = expressionTypeManager.getExpressionType(analysis.getSelectExpressions().get(3)); + Schema exprType4 = expressionTypeManager.getExpressionType(analysis.getSelectExpressions().get(4)); + + Assert.assertTrue(exprType0 == Schema.FLOAT64_SCHEMA); + Assert.assertTrue(exprType1 == Schema.FLOAT64_SCHEMA); + Assert.assertTrue(exprType2 == Schema.FLOAT64_SCHEMA); + Assert.assertTrue(exprType3 == Schema.FLOAT64_SCHEMA); + Assert.assertTrue(exprType4 == Schema.INT64_SCHEMA); + } + + @Test + public void testStringUDFExpr() throws Exception { + String simpleQuery = "SELECT LCASE(col1), UCASE(col2), TRIM(col1), CONCAT(col1,'_test'), SUBSTRING(col1, 1, 3) FROM test1;"; + Analysis analysis = analyzeQuery(simpleQuery); + ExpressionTypeManager expressionTypeManager = new ExpressionTypeManager(schema); + Schema exprType0 = expressionTypeManager.getExpressionType(analysis.getSelectExpressions().get(0)); + Schema exprType1 = expressionTypeManager.getExpressionType(analysis.getSelectExpressions().get(1)); + Schema exprType2 = expressionTypeManager.getExpressionType(analysis.getSelectExpressions().get(2)); + Schema exprType3 = expressionTypeManager.getExpressionType(analysis.getSelectExpressions().get(3)); + Schema exprType4 = expressionTypeManager.getExpressionType(analysis.getSelectExpressions().get(4)); + + Assert.assertTrue(exprType0 == Schema.STRING_SCHEMA); + Assert.assertTrue(exprType1 == Schema.STRING_SCHEMA); + Assert.assertTrue(exprType2 == Schema.STRING_SCHEMA); + Assert.assertTrue(exprType3 == Schema.STRING_SCHEMA); + Assert.assertTrue(exprType4 == Schema.STRING_SCHEMA); + } +} diff --git a/ksql-core/src/test/java/io/confluent/ksql/util/ExpressionUtilTest.java b/ksql-core/src/test/java/io/confluent/ksql/util/ExpressionUtilTest.java new file mode 100644 index 000000000000..3cd93ac8018a --- /dev/null +++ b/ksql-core/src/test/java/io/confluent/ksql/util/ExpressionUtilTest.java @@ -0,0 +1,180 @@ +package io.confluent.ksql.util; + + +import io.confluent.ksql.analyzer.Analysis; +import io.confluent.ksql.analyzer.AnalysisContext; +import io.confluent.ksql.analyzer.Analyzer; +import io.confluent.ksql.metastore.MetaStore; +import io.confluent.ksql.parser.KsqlParser; +import io.confluent.ksql.parser.tree.Statement; +import org.apache.kafka.connect.data.Schema; +import org.apache.kafka.connect.data.SchemaBuilder; +import org.junit.Assert; +import org.junit.Before; +import org.junit.Test; + +import java.util.List; + +public class ExpressionUtilTest { + + private static final KsqlParser KSQL_PARSER = new KsqlParser(); + private MetaStore metaStore; + private Schema schema; + private ExpressionUtil expressionUtil; + + @Before + public void init() { + metaStore = KsqlTestUtil.getNewMetaStore(); + schema = SchemaBuilder.struct() + .field("TEST1.COL0", SchemaBuilder.INT64_SCHEMA) + .field("TEST1.COL1", SchemaBuilder.STRING_SCHEMA) + .field("TEST1.COL2", SchemaBuilder.STRING_SCHEMA) + .field("TEST1.COL3", SchemaBuilder.FLOAT64_SCHEMA); + expressionUtil = new ExpressionUtil(); + } + + private Analysis analyzeQuery(String queryStr) { + List statements = KSQL_PARSER.buildAst(queryStr, metaStore); + // Analyze the query to resolve the references and extract oeprations + Analysis analysis = new Analysis(); + Analyzer analyzer = new Analyzer(analysis, metaStore); + analyzer.process(statements.get(0), new AnalysisContext(null, null)); + return analysis; + } + + @Test + public void testArithmaticExpr() throws Exception { + String simpleQuery = "SELECT col0+col3, col2, col3+10, col0*25, 12*4+2 FROM test1 WHERE col0 > 100;"; + Analysis analysis = analyzeQuery(simpleQuery); + + ExpressionMetadata expressionEvaluatorMetadata0 = expressionUtil.getExpressionEvaluator + (analysis + .getSelectExpressions().get(0),schema); + Assert.assertTrue(expressionEvaluatorMetadata0.getIndexes().length == 2); + Assert.assertTrue(expressionEvaluatorMetadata0.getIndexes()[0] == 3); + Assert.assertTrue(expressionEvaluatorMetadata0.getIndexes()[1] == 0); + Assert.assertTrue(expressionEvaluatorMetadata0.getUdfs().length == 2); + Object result0 = expressionEvaluatorMetadata0.getExpressionEvaluator().evaluate(new Object[]{10.0, 5l}); + Assert.assertTrue(result0 instanceof Double); + Assert.assertTrue(((Double)result0) == 15.0); + + ExpressionMetadata expressionEvaluatorMetadata1 = expressionUtil.getExpressionEvaluator + (analysis.getSelectExpressions().get(3),schema); + Assert.assertTrue(expressionEvaluatorMetadata1.getIndexes().length == 1); + Assert.assertTrue(expressionEvaluatorMetadata1.getIndexes()[0] == 0); + Assert.assertTrue(expressionEvaluatorMetadata1.getUdfs().length == 1); + Object result1 = expressionEvaluatorMetadata1.getExpressionEvaluator().evaluate(new Object[]{5l}); + Assert.assertTrue(result1 instanceof Long); + Assert.assertTrue(((Long)result1) == 125l); + + ExpressionMetadata expressionEvaluatorMetadata2 = expressionUtil.getExpressionEvaluator + (analysis.getSelectExpressions().get(4),schema); + Assert.assertTrue(expressionEvaluatorMetadata2.getIndexes().length == 0); + Assert.assertTrue(expressionEvaluatorMetadata2.getUdfs().length == 0); + Object result2 = expressionEvaluatorMetadata2.getExpressionEvaluator().evaluate(new Object[]{}); + Assert.assertTrue(result2 instanceof Long); + Assert.assertTrue(((Long)result2) == 50); + } + + @Test + public void testUDFExpr() throws Exception { + String simpleQuery = "SELECT FLOOR(col3), CEIL(col3*3), ABS(col0+1.34), RANDOM()+10, ROUND(col3*2)+12 FROM test1;"; + Analysis analysis = analyzeQuery(simpleQuery); + GenericRowValueTypeEnforcer genericRowValueTypeEnforcer = new GenericRowValueTypeEnforcer(schema); + + ExpressionMetadata expressionEvaluator0 = expressionUtil.getExpressionEvaluator(analysis + .getSelectExpressions().get(0),schema); + Object argObj0 = genericRowValueTypeEnforcer.enforceFieldType(3, 1.5); + Object result0 = expressionEvaluator0.getExpressionEvaluator().evaluate(new + Object[]{expressionEvaluator0.getUdfs() + [0], argObj0}); + Assert.assertTrue(argObj0 instanceof Double); + Assert.assertTrue(result0 instanceof Double); + Assert.assertTrue(((Double)result0) == 1.0); + + ExpressionMetadata expressionEvaluator1 = expressionUtil.getExpressionEvaluator(analysis + .getSelectExpressions().get(1),schema); + Object argObj1 = genericRowValueTypeEnforcer.enforceFieldType(3, 1.5); + Object result1 = expressionEvaluator1.getExpressionEvaluator().evaluate(new + Object[]{expressionEvaluator1.getUdfs() + [0], argObj1}); + Assert.assertTrue(argObj1 instanceof Double); + Assert.assertTrue(result1 instanceof Double); + Assert.assertTrue(((Double)result1) == 5.0); + + + ExpressionMetadata expressionEvaluator2 = expressionUtil.getExpressionEvaluator(analysis + .getSelectExpressions().get(2),schema); + Object argObj2 = genericRowValueTypeEnforcer.enforceFieldType(0, 15); + Object result2 = expressionEvaluator2.getExpressionEvaluator().evaluate(new + Object[]{expressionEvaluator2.getUdfs() + [0], argObj2}); + Assert.assertTrue(argObj2 instanceof Long); + Assert.assertTrue(result2 instanceof Double); + Assert.assertTrue(((Double)result2) == 16.34); + + ExpressionMetadata expressionEvaluator3 = expressionUtil.getExpressionEvaluator(analysis + .getSelectExpressions().get(3),schema); + Object result3 = expressionEvaluator3.getExpressionEvaluator().evaluate(new + Object[]{expressionEvaluator3.getUdfs()[0]}); + Assert.assertTrue(result3 instanceof Double); + Assert.assertTrue(((Double)result3).intValue() == 10); + + ExpressionMetadata expressionEvaluator4 = expressionUtil.getExpressionEvaluator(analysis + .getSelectExpressions().get(4),schema); + Object argObj4 = genericRowValueTypeEnforcer.enforceFieldType(3, 1.5); + Object result4 = expressionEvaluator4.getExpressionEvaluator().evaluate(new + Object[]{expressionEvaluator4.getUdfs() + [0], argObj4}); + Assert.assertTrue(argObj4 instanceof Double); + Assert.assertTrue(result4 instanceof Long); + Assert.assertTrue(((Long)result4) == 15); + + } + + @Test + public void testStringUDFExpr() throws Exception { + GenericRowValueTypeEnforcer genericRowValueTypeEnforcer = new GenericRowValueTypeEnforcer(schema); + String simpleQuery = "SELECT LCASE(col1), UCASE(col2), TRIM(col1), CONCAT(col1,'_test'), SUBSTRING(col1, 1, 3) FROM test1;"; + Analysis analysis = analyzeQuery(simpleQuery); + + + ExpressionMetadata expressionEvaluator0 = expressionUtil.getExpressionEvaluator(analysis + .getSelectExpressions().get(0),schema); + Object argObj0 = genericRowValueTypeEnforcer.enforceFieldType(2, "Hello"); + Object result0 = expressionEvaluator0.getExpressionEvaluator().evaluate(new + Object[]{expressionEvaluator0.getUdfs() + [0], argObj0}); + Assert.assertTrue(result0 instanceof String); + Assert.assertTrue(result0.equals("hello")); + + ExpressionMetadata expressionEvaluator1 = expressionUtil.getExpressionEvaluator(analysis + .getSelectExpressions().get(1),schema); + Object argObj1 = genericRowValueTypeEnforcer.enforceFieldType(2, "Hello"); + Object result1 = expressionEvaluator1.getExpressionEvaluator().evaluate(new + Object[]{expressionEvaluator1.getUdfs() + [0], argObj1}); + Assert.assertTrue(result1 instanceof String); + Assert.assertTrue(result1.equals("HELLO")); + + ExpressionMetadata expressionEvaluator2 = expressionUtil.getExpressionEvaluator(analysis + .getSelectExpressions().get(2),schema); + Object argObj2 = genericRowValueTypeEnforcer.enforceFieldType(2, " Hello "); + Object result2 = expressionEvaluator2.getExpressionEvaluator().evaluate(new + Object[]{expressionEvaluator2.getUdfs() + [0], argObj2}); + Assert.assertTrue(result2 instanceof String); + Assert.assertTrue(result2.equals("Hello")); + + ExpressionMetadata expressionEvaluator3 = expressionUtil.getExpressionEvaluator(analysis + .getSelectExpressions().get(3),schema); + Object argObj3 = genericRowValueTypeEnforcer.enforceFieldType(2, "Hello"); + Object result3 = expressionEvaluator3.getExpressionEvaluator().evaluate(new + Object[]{expressionEvaluator3.getUdfs() + [0], argObj3}); + Assert.assertTrue(result3 instanceof String); + Assert.assertTrue(result3.equals("Hello_test")); + + } + +} diff --git a/ksql-core/src/test/java/io/confluent/ksql/util/FakeKafkaTopicClient.java b/ksql-core/src/test/java/io/confluent/ksql/util/FakeKafkaTopicClient.java new file mode 100644 index 000000000000..e0cb2d08f99a --- /dev/null +++ b/ksql-core/src/test/java/io/confluent/ksql/util/FakeKafkaTopicClient.java @@ -0,0 +1,43 @@ +/** + * Copyright 2017 Confluent Inc. + **/ + +package io.confluent.ksql.util; + +import org.apache.kafka.clients.admin.TopicDescription; + +import java.util.Collection; +import java.util.Map; +import java.util.Set; + +/** + * Fake Kafka Client is for test only, none of its methods should be called. + */ +public class FakeKafkaTopicClient implements KafkaTopicClient { + + @Override + public void createTopic(String topic, int numPartitions, short replicatonFactor) { + throw new UnsupportedOperationException("Calling method on FakeObject"); + } + + @Override + public boolean isTopicExists(String topic) { + throw new UnsupportedOperationException("Calling method on FakeObject"); + } + + @Override + public Set listTopicNames() { + throw new UnsupportedOperationException("Calling method on FakeObject"); + } + + @Override + public Map describeTopics(Collection topicNames) { + throw new UnsupportedOperationException("Calling method on FakeObject"); + } + + @Override + public void close() { + throw new UnsupportedOperationException("Calling method on FakeObject"); + } + +} diff --git a/ksql-core/src/test/java/io/confluent/ksql/util/KsqlTestUtil.java b/ksql-core/src/test/java/io/confluent/ksql/util/KsqlTestUtil.java new file mode 100644 index 000000000000..e10c0c6c221d --- /dev/null +++ b/ksql-core/src/test/java/io/confluent/ksql/util/KsqlTestUtil.java @@ -0,0 +1,106 @@ +package io.confluent.ksql.util; + +import io.confluent.ksql.metastore.KsqlStream; +import io.confluent.ksql.metastore.KsqlTable; +import io.confluent.ksql.metastore.KsqlTopic; +import io.confluent.ksql.metastore.MetaStore; +import io.confluent.ksql.metastore.MetaStoreImpl; +import io.confluent.ksql.physical.GenericRow; +import io.confluent.ksql.serde.json.KsqlJsonTopicSerDe; +import org.apache.kafka.connect.data.SchemaBuilder; +import org.apache.kafka.streams.kstream.Windowed; +import org.junit.Assert; + +import java.util.HashMap; +import java.util.Map; + +public class KsqlTestUtil { + + public static MetaStore getNewMetaStore() { + + MetaStore metaStore = new MetaStoreImpl(); + + SchemaBuilder schemaBuilder1 = SchemaBuilder.struct() + .field("COL0", SchemaBuilder.INT64_SCHEMA) + .field("COL1", SchemaBuilder.STRING_SCHEMA) + .field("COL2", SchemaBuilder.STRING_SCHEMA) + .field("COL3", SchemaBuilder.FLOAT64_SCHEMA) + .field("COL4", SchemaBuilder.array(SchemaBuilder.FLOAT64_SCHEMA)) + .field("COL5", SchemaBuilder.map(SchemaBuilder.STRING_SCHEMA, SchemaBuilder.FLOAT64_SCHEMA)); + + KsqlTopic + ksqlTopic1 = + new KsqlTopic("TEST1", "test1", new KsqlJsonTopicSerDe(null)); + + KsqlStream ksqlStream = new KsqlStream("TEST1", schemaBuilder1, schemaBuilder1.field + ("COL0"), null, + ksqlTopic1); + + metaStore.putTopic(ksqlTopic1); + metaStore.putSource(ksqlStream); + + SchemaBuilder schemaBuilder2 = SchemaBuilder.struct() + .field("COL0", SchemaBuilder.INT64_SCHEMA) + .field("COL1", SchemaBuilder.STRING_SCHEMA) + .field("COL2", SchemaBuilder.STRING_SCHEMA) + .field("COL3", SchemaBuilder.FLOAT64_SCHEMA) + .field("COL4", SchemaBuilder.BOOLEAN_SCHEMA); + + KsqlTopic + ksqlTopic2 = + new KsqlTopic("TEST2", "test2", new KsqlJsonTopicSerDe(null)); + KsqlTable ksqlTable = new KsqlTable("TEST2", schemaBuilder2, schemaBuilder2.field("COL0"), + null, + ksqlTopic2, "TEST2", false); + + metaStore.putTopic(ksqlTopic2); + metaStore.putSource(ksqlTable); + + SchemaBuilder schemaBuilderOrders = SchemaBuilder.struct() + .field("ORDERTIME", SchemaBuilder.INT64_SCHEMA) + .field("ORDERID", SchemaBuilder.STRING_SCHEMA) + .field("ITEMID", SchemaBuilder.STRING_SCHEMA) + .field("ORDERUNITS", SchemaBuilder.FLOAT64_SCHEMA); + + KsqlTopic + ksqlTopicOrders = + new KsqlTopic("ORDERS_TOPIC", "orders_topic", new KsqlJsonTopicSerDe(null)); + + KsqlStream ksqlStreamOrders = new KsqlStream("ORDERS", schemaBuilderOrders, + schemaBuilderOrders.field("ORDERTIME"), null, + ksqlTopicOrders); + + metaStore.putTopic(ksqlTopicOrders); + metaStore.putSource(ksqlStreamOrders); + + return metaStore; + } + + public static void assertExpectedResults(Map actualResult, + Map expectedResult) { + Assert.assertEquals(actualResult.size(), expectedResult.size()); + + for (String k: expectedResult.keySet()) { + Assert.assertTrue(actualResult.containsKey(k)); + Assert.assertEquals(expectedResult.get(k), actualResult.get(k)); + } + } + + public static void assertExpectedWindowedResults(Map, GenericRow> actualResult, + Map, GenericRow> expectedResult) { + Map actualResultSimplified = new HashMap<>(); + Map expectedResultSimplified = new HashMap<>(); + for (Windowed k: expectedResult.keySet()) { + expectedResultSimplified.put(k.key(), expectedResult.get(k)); + } + + for (Windowed k: actualResult.keySet()) { + if (actualResult.get(k) != null) { + actualResultSimplified.put(k.key(), actualResult.get(k)); + } + + } + assertExpectedResults(actualResultSimplified, expectedResultSimplified); + } + +} \ No newline at end of file diff --git a/ksql-core/src/test/java/io/confluent/ksql/util/OrderDataProvider.java b/ksql-core/src/test/java/io/confluent/ksql/util/OrderDataProvider.java new file mode 100644 index 000000000000..59991fbe09c4 --- /dev/null +++ b/ksql-core/src/test/java/io/confluent/ksql/util/OrderDataProvider.java @@ -0,0 +1,106 @@ +/** + * Copyright 2017 Confluent Inc. + **/ + +package io.confluent.ksql.util; + +import io.confluent.ksql.physical.GenericRow; +import org.apache.kafka.connect.data.Schema; +import org.apache.kafka.connect.data.SchemaBuilder; + +import java.util.Arrays; +import java.util.HashMap; +import java.util.Map; + +public class OrderDataProvider extends TestDataProvider { + + private static final String namePrefix = + "ORDER"; + + private static final String ksqlSchemaString = + "(ORDERTIME bigint, ORDERID varchar, ITEMID varchar, ORDERUNITS double, PRICEARRAY array, KEYVALUEMAP map)"; + + private static final String key = "ORDERTIME"; + + private static final Schema schema = SchemaBuilder.struct() + .field("ORDERTIME", SchemaBuilder.INT64_SCHEMA) + .field("ORDERID", SchemaBuilder.STRING_SCHEMA) + .field("ITEMID", SchemaBuilder.STRING_SCHEMA) + .field("ORDERUNITS", SchemaBuilder.FLOAT64_SCHEMA) + .field("PRICEARRAY", SchemaBuilder.array(SchemaBuilder.FLOAT64_SCHEMA)) + .field("KEYVALUEMAP", SchemaBuilder.map(SchemaBuilder.STRING_SCHEMA, SchemaBuilder.FLOAT64_SCHEMA)).build(); + + private static final Map data = buildData(); + + public OrderDataProvider() { + super(namePrefix, ksqlSchemaString, key, schema, data); + } + + private static Map buildData() { + + Map mapField = new HashMap<>(); + mapField.put("key1", 1.0); + mapField.put("key2", 2.0); + mapField.put("key3", 3.0); + + Map dataMap = new HashMap<>(); + dataMap.put("1", new GenericRow(Arrays.asList(1, + "ORDER_1", + "ITEM_1", 10.0, new + Double[]{100.0, + 110.99, + 90.0 }, + mapField))); + dataMap.put("2", new GenericRow(Arrays.asList(2, "ORDER_2", + "ITEM_2", 20.0, new + Double[]{10.0, + 10.99, + 9.0 }, + mapField))); + + dataMap.put("3", new GenericRow(Arrays.asList(3, "ORDER_3", + "ITEM_3", 30.0, new + Double[]{10.0, + 10.99, + 91.0 }, + mapField))); + + dataMap.put("4", new GenericRow(Arrays.asList(4, "ORDER_4", + "ITEM_4", 40.0, new + Double[]{10.0, + 140.99, + 94.0 }, + mapField))); + + dataMap.put("5", new GenericRow(Arrays.asList(5, "ORDER_5", + "ITEM_5", 50.0, new + Double[]{160.0, + 160.99, + 98.0 }, + mapField))); + + dataMap.put("6", new GenericRow(Arrays.asList(6, "ORDER_6", + "ITEM_6", 60.0, new + Double[]{1000.0, + 1100.99, + 900.0 }, + mapField))); + + dataMap.put("7", new GenericRow(Arrays.asList(7, "ORDER_6", + "ITEM_7", 70.0, new + Double[]{1100.0, + 1110.99, + 190.0 }, + mapField))); + + dataMap.put("8", new GenericRow(Arrays.asList(8, "ORDER_6", + "ITEM_8", 80.0, new + Double[]{1100.0, + 1110.99, + 970.0 }, + mapField))); + + return dataMap; + } + +} diff --git a/ksql-core/src/test/java/io/confluent/ksql/util/SchemaUtilTest.java b/ksql-core/src/test/java/io/confluent/ksql/util/SchemaUtilTest.java new file mode 100644 index 000000000000..be028f79a7dc --- /dev/null +++ b/ksql-core/src/test/java/io/confluent/ksql/util/SchemaUtilTest.java @@ -0,0 +1,30 @@ +package io.confluent.ksql.util; + +import org.apache.kafka.connect.data.Schema; +import org.apache.kafka.connect.data.SchemaBuilder; +import org.junit.Assert; +import org.junit.Test; + +public class SchemaUtilTest { + + @Test + public void testGetJavaType() { + Class booleanClazz = SchemaUtil.getJavaType(Schema.BOOLEAN_SCHEMA); + Class intClazz = SchemaUtil.getJavaType(Schema.INT32_SCHEMA); + Class longClazz = SchemaUtil.getJavaType(Schema.INT64_SCHEMA); + Class doubleClazz = SchemaUtil.getJavaType(Schema.FLOAT64_SCHEMA); + Class StringClazz = SchemaUtil.getJavaType(Schema.STRING_SCHEMA); + Class arrayClazz = SchemaUtil.getJavaType(SchemaBuilder.array(Schema.FLOAT64_SCHEMA)); + Class mapClazz = SchemaUtil.getJavaType(SchemaBuilder.map(Schema.STRING_SCHEMA, Schema.FLOAT64_SCHEMA)); + + Assert.assertTrue(booleanClazz.getCanonicalName().equals("java.lang.Boolean")); + Assert.assertTrue(intClazz.getCanonicalName().equals("java.lang.Integer")); + Assert.assertTrue(longClazz.getCanonicalName().equals("java.lang.Long")); + Assert.assertTrue(doubleClazz.getCanonicalName().equals("java.lang.Double")); + Assert.assertTrue(StringClazz.getCanonicalName().equals("java.lang.String")); + Assert.assertTrue(arrayClazz.getCanonicalName().equals("java.lang.Double[]")); + Assert.assertTrue(mapClazz.getCanonicalName().equals("java.util.HashMap")); + + } + +} diff --git a/ksql-core/src/test/java/io/confluent/ksql/util/TestDataProvider.java b/ksql-core/src/test/java/io/confluent/ksql/util/TestDataProvider.java new file mode 100644 index 000000000000..5acfd5d8c0fb --- /dev/null +++ b/ksql-core/src/test/java/io/confluent/ksql/util/TestDataProvider.java @@ -0,0 +1,52 @@ +/** + * Copyright 2017 Confluent Inc. + **/ + +package io.confluent.ksql.util; + +import io.confluent.ksql.physical.GenericRow; +import org.apache.kafka.connect.data.Schema; + +import java.util.Map; + +public abstract class TestDataProvider { + final String topicName; + final String ksqlSchemaString; + final String key; + final Schema schema; + final Map data; + final String kstreamName; + + public TestDataProvider(String namePrefix, String ksqlSchemaString, String key, Schema schema, Map data) { + this.topicName = namePrefix + "_TOPIC"; + this.kstreamName = namePrefix + "_KSTREAM"; + this.ksqlSchemaString = ksqlSchemaString; + this.key = key; + this.schema = schema; + this.data = data; + } + + public String topicName() { + return topicName; + } + + public String ksqlSchemaString() { + return ksqlSchemaString; + } + + public String key() { + return key; + } + + public Schema schema() { + return schema; + } + + public Map data() { + return data; + } + + public String kstreamName() { + return kstreamName; + } +} diff --git a/ksql-core/src/test/java/io/confluent/ksql/util/TopicConsumer.java b/ksql-core/src/test/java/io/confluent/ksql/util/TopicConsumer.java new file mode 100644 index 000000000000..9215605b3db0 --- /dev/null +++ b/ksql-core/src/test/java/io/confluent/ksql/util/TopicConsumer.java @@ -0,0 +1,72 @@ +/** + * Copyright 2017 Confluent Inc. + **/ + +package io.confluent.ksql.util; + +import io.confluent.ksql.physical.GenericRow; +import io.confluent.ksql.serde.json.KsqlJsonDeserializer; +import io.confluent.ksql.testutils.EmbeddedSingleNodeKafkaCluster; +import org.apache.kafka.clients.consumer.ConsumerConfig; +import org.apache.kafka.clients.consumer.ConsumerRecord; +import org.apache.kafka.clients.consumer.KafkaConsumer; +import org.apache.kafka.common.serialization.Deserializer; +import org.apache.kafka.connect.data.Schema; + +import java.util.Collections; +import java.util.HashMap; +import java.util.Map; +import java.util.Properties; + +public class TopicConsumer { + + public static final long RESULTS_POLL_MAX_TIME_MS = 30000; + public static final long RESULTS_EXTRA_POLL_TIME_MS = 250; + + private final EmbeddedSingleNodeKafkaCluster cluster; + + public TopicConsumer(EmbeddedSingleNodeKafkaCluster cluster) { + this.cluster = cluster; + } + + public Map readResults( + String topic, + Schema schema, + int expectedNumMessages, + Deserializer keyDeserializer + ) { + Map result = new HashMap<>(); + + Properties consumerConfig = new Properties(); + consumerConfig.put(ConsumerConfig.BOOTSTRAP_SERVERS_CONFIG, cluster.bootstrapServers()); + consumerConfig.put(ConsumerConfig.GROUP_ID_CONFIG, "filter-integration-test-standard-consumer"); + consumerConfig.put(ConsumerConfig.AUTO_OFFSET_RESET_CONFIG, "earliest"); + + try (KafkaConsumer consumer = + new KafkaConsumer<>(consumerConfig, keyDeserializer, new KsqlJsonDeserializer(schema)) + ) { + consumer.subscribe(Collections.singleton(topic)); + long pollStart = System.currentTimeMillis(); + long pollEnd = pollStart + RESULTS_POLL_MAX_TIME_MS; + while (System.currentTimeMillis() < pollEnd && continueConsuming(result.size(), expectedNumMessages)) { + for (ConsumerRecord record : consumer.poll(Math.max(1, pollEnd - System.currentTimeMillis()))) { + if (record.value() != null) { + result.put(record.key(), record.value()); + } + } + } + + for (ConsumerRecord record : consumer.poll(RESULTS_EXTRA_POLL_TIME_MS)) { + if (record.value() != null) { + result.put(record.key(), record.value()); + } + } + } + return result; + } + + private static boolean continueConsuming(int messagesConsumed, int maxMessages) { + return maxMessages < 0 || messagesConsumed < maxMessages; + } + +} diff --git a/ksql-core/src/test/java/io/confluent/ksql/util/TopicProducer.java b/ksql-core/src/test/java/io/confluent/ksql/util/TopicProducer.java new file mode 100644 index 000000000000..1d0ae6cb34e6 --- /dev/null +++ b/ksql-core/src/test/java/io/confluent/ksql/util/TopicProducer.java @@ -0,0 +1,76 @@ +/** + * Copyright 2017 Confluent Inc. + **/ + +package io.confluent.ksql.util; + +import io.confluent.ksql.physical.GenericRow; +import io.confluent.ksql.serde.json.KsqlJsonSerializer; +import io.confluent.ksql.testutils.EmbeddedSingleNodeKafkaCluster; +import org.apache.kafka.clients.producer.KafkaProducer; +import org.apache.kafka.clients.producer.ProducerConfig; +import org.apache.kafka.clients.producer.ProducerRecord; +import org.apache.kafka.clients.producer.RecordMetadata; +import org.apache.kafka.common.serialization.StringSerializer; +import org.apache.kafka.connect.data.Schema; + +import java.util.HashMap; +import java.util.Map; +import java.util.Properties; +import java.util.concurrent.ExecutionException; +import java.util.concurrent.Future; +import java.util.concurrent.TimeUnit; +import java.util.concurrent.TimeoutException; + +public class TopicProducer { + + public static final long TEST_RECORD_FUTURE_TIMEOUT_MS = 5000; + + private final EmbeddedSingleNodeKafkaCluster cluster; + private final Properties producerConfig; + + public TopicProducer(EmbeddedSingleNodeKafkaCluster cluster) { + this.cluster = cluster; + + this.producerConfig = new Properties(); + producerConfig.put(ProducerConfig.BOOTSTRAP_SERVERS_CONFIG, cluster.bootstrapServers()); + producerConfig.put(ProducerConfig.ACKS_CONFIG, "all"); + producerConfig.put(ProducerConfig.RETRIES_CONFIG, 0); + } + + /** + * Topic topicName will be automatically created if it doesn't exist. + * @param topicName + * @param recordsToPublish + * @param schema + * @return + * @throws InterruptedException + * @throws TimeoutException + * @throws ExecutionException + */ + public Map produceInputData(String topicName, Map recordsToPublish, Schema schema) + throws InterruptedException, TimeoutException, ExecutionException { + + KafkaProducer producer = + new KafkaProducer<>(producerConfig, new StringSerializer(), new KsqlJsonSerializer(schema)); + + Map result = new HashMap<>(); + for (Map.Entry recordEntry : recordsToPublish.entrySet()) { + String key = recordEntry.getKey(); + ProducerRecord producerRecord = new ProducerRecord<>(topicName, key, recordEntry.getValue()); + Future recordMetadataFuture = producer.send(producerRecord); + result.put(key, recordMetadataFuture.get(TEST_RECORD_FUTURE_TIMEOUT_MS, TimeUnit.MILLISECONDS)); + } + producer.close(); + + return result; + } + + /** + * Produce input data to the topic named dataProvider.topicName() + */ + public Map produceInputData(TestDataProvider dataProvider) throws Exception { + return produceInputData(dataProvider.topicName(), dataProvider.data(), dataProvider.schema()); + } + +} diff --git a/ksql-core/src/test/java/io/confluent/ksql/util/json/JsonPathTokenizerTest.java b/ksql-core/src/test/java/io/confluent/ksql/util/json/JsonPathTokenizerTest.java new file mode 100644 index 000000000000..862035aea9f7 --- /dev/null +++ b/ksql-core/src/test/java/io/confluent/ksql/util/json/JsonPathTokenizerTest.java @@ -0,0 +1,25 @@ +package io.confluent.ksql.util.json; + + +import com.google.common.collect.ImmutableList; +import org.junit.Assert; +import org.junit.Test; + +import java.io.IOException; +import java.util.List; + +public class JsonPathTokenizerTest { + + @Test + public void testJsonPathTokenizer() throws IOException { + JsonPathTokenizer jsonPathTokenizer = new JsonPathTokenizer("$.log.cloud.region"); + ImmutableList tokens = ImmutableList.copyOf(jsonPathTokenizer); + List tokenList = tokens.asList(); + Assert.assertTrue(tokenList.size() == 3); + Assert.assertTrue(tokenList.get(0).equalsIgnoreCase("log")); + Assert.assertTrue(tokenList.get(1).equalsIgnoreCase("cloud")); + Assert.assertTrue(tokenList.get(2).equalsIgnoreCase("region")); + + } + +} diff --git a/ksql-core/src/test/resources/TestCatalog.json b/ksql-core/src/test/resources/TestCatalog.json new file mode 100644 index 000000000000..012c1aacdc30 --- /dev/null +++ b/ksql-core/src/test/resources/TestCatalog.json @@ -0,0 +1,92 @@ +{ + "name": "ksql_catalog", + "topics" :[ + { + "namespace": "ksql-topics", + "topicname": "ORDERS_TOPIC_AVRO", + "kafkatopicname": "orders_kafka_topic_avro", + "serde": "avro", + "avroschemafile": "src/test/resources/avro_order_schema.avro" + } + , + { + "namespace": "ksql-topics", + "topicname": "PAGEVIEW_TOPIC", + "kafkatopicname": "pageview_kafka_topic_json", + "serde": "json" + } + , + { + "namespace": "ksql-topics", + "topicname": "ORDERS_TOPIC", + "kafkatopicname": "orders_kafka_topic", + "serde": "json" + } + , + { + "namespace": "ksql-topics", + "topicname": "USERS_TOPIC", + "kafkatopicname": "users_kafka_topic_json", + "serde": "json" + } + ] + +, + "schemas" :[ + { + "namespace": "ksql", + "type": "STREAM", + "name": "ORDERS", + "key": "ORDERTIME", + "timestamp": "null", + "topic": "ORDERS_TOPIC", + "fields": [ + {"name": "ORDERTIME", "type": "long"} , + {"name": "ORDERID", "type": "string"} , + {"name": "ITEMID", "type": "string"} , + {"name": "ORDERUNITS", "type": "double"} ] + } + , + { + "namespace": "ksql", + "type": "STREAM", + "name": "PAGEVIEW", + "key": "VIEWTIME", + "timestamp": "null", + "topic": "PAGEVIEW_TOPIC", + "fields": [ + {"name": "VIEWTIME", "type": "long"} , + {"name": "USERID", "type": "string"} , + {"name": "PAGEID", "type": "string"} ] + } + , + { + "namespace": "ksql", + "type": "TABLE", + "name": "USERS", + "key": "USERID", + "timestamp": "null", + "topic": "USERS_TOPIC", + "statestore": "users_statestore", + "fields": [ + {"name": "USERTIME", "type": "long"} , + {"name": "USERID", "type": "string"} , + {"name": "REGIONID", "type": "string"} , + {"name": "GENDER", "type": "string"} ] + } + , + { + "namespace": "ksql", + "type": "STREAM", + "name": "ORDERS_AVRO", + "key": "ORDERTIME", + "timestamp": "null", + "topic": "ORDERS_TOPIC_AVRO", + "fields": [ + {"name": "ORDERTIME", "type": "long"} , + {"name": "ORDERID", "type": "string"} , + {"name": "ITEMID", "type": "string"} , + {"name": "ORDERUNITS", "type": "double"} ] + } + ] +} \ No newline at end of file diff --git a/ksql-core/src/test/resources/avro_order_schema.avro b/ksql-core/src/test/resources/avro_order_schema.avro new file mode 100644 index 000000000000..babaedd4f9a9 --- /dev/null +++ b/ksql-core/src/test/resources/avro_order_schema.avro @@ -0,0 +1,11 @@ +{ + "namespace": "ksql", + "name": "orders", + "type": "record", + "fields": [ + {"name": "ORDERTIME", "type": "long"}, + {"name": "ORDERID", "type": "string"}, + {"name": "ITEMID", "type": "string"}, + {"name": "ORDERUNITS", "type": "double"} + ] +} \ No newline at end of file diff --git a/ksql-examples/Dockerfile b/ksql-examples/Dockerfile new file mode 100644 index 000000000000..cc64cb845de9 --- /dev/null +++ b/ksql-examples/Dockerfile @@ -0,0 +1,11 @@ +ARG DOCKER_REGISTRY + +FROM ${DOCKER_REGISTRY}confluentinc/cp-base + +ARG KSQL_VERSION +ARG ARTIFACT_ID + +ADD target/${ARTIFACT_ID}-${KSQL_VERSION}-standalone.jar /usr/share/java/${ARTIFACT_ID}/${ARTIFACT_ID}-${KSQL_VERSION}-standalone.jar +ADD target/${ARTIFACT_ID}-${KSQL_VERSION}-package/bin/* /usr/bin/ +ADD target/${ARTIFACT_ID}-${KSQL_VERSION}-package/etc/* /etc/ksql/ +ADD target/${ARTIFACT_ID}-${KSQL_VERSION}-package/share/doc/* /usr/share/doc/${ARTIFACT_ID}/ diff --git a/ksql-examples/README.md b/ksql-examples/README.md new file mode 100644 index 000000000000..3d9c709a22b8 --- /dev/null +++ b/ksql-examples/README.md @@ -0,0 +1,53 @@ +# ksql example +This package provides KSQL example queries along with data generator tools for them. +After building the project you can run the examples by following these steps: + +1. Start Zookeeper and Kafka in local machine with defaut settings + +2. Start the data gen for the users using the following command: + + $ java -jar ./ksql-examples/target/ksql-examples-0.1-SNAPSHOT-standalone.jar + quickstart=users format=json topic=user_topic_json maxInterval=1000 + + +3. Start the data gen for pageview topic using the following command: + + $ java -jar ./ksql-examples/target/ksql-examples-0.1-SNAPSHOT-standalone.jar + quickstart=pageview format=json topic=pageview_topic_json + +4. Start the KSQL CLI using the following command: + + $ java -jar ksql-cli/target/ksql-cli-0.1-SNAPSHOT-standalone.jar local + + +You will be able to run the provided queries and see the results. The data gen module will continuously push new messages to the topics until you terminate them + +Here are the sample queries: + + REGISTER TOPIC users_topic WITH (value_format = 'json', kafka_topic='user_topic_json'); + + REGISTER TOPIC pageview_topic WITH (value_format = 'json', kafka_topic='pageview_topic_json'); + + CREATE STREAM pageview (viewtime bigint, pageid varchar, userid varchar) WITH (registered_topic = 'pageview_topic'); + + CREATE TABLE users (registertime bigint, userid varchar, regionid varchar, gender varchar) WITH (registered_topic = 'users_topic'); + + + -- Enrich the pageview stream + CREATE STREAM enrichedpv AS SELECT users.userid AS userid, pageid, regionid, gender FROM pageview LEFT JOIN users ON pageview.userid = users.userid; + + -- Find all the pageviews by female users + CREATE STREAM enrichedpv_female AS SELECT users.userid AS userid, pageid, regionid, gender FROM pageview LEFT JOIN users ON pageview.userid = users.userid WHERE gender = 'FEMALE'; + + -- Find the pageviews from reagion with id ending in _8 and _9 from the female pageview + CREATE STREAM enrichedpv_female_r8 AS SELECT * FROM enrichedpv_female WHERE regionid LIKE '%_8' OR regionid LIKE '%_9'; + + -- Number of views for each page for tumbling window of 5 seconds + CREATE TABLE pvcount AS SELECT pageid, count(*) from enrichedpv window tumbling (size 5 second) group by pageid; + + -- Number of views for each page for tumbling window of 5 minutes + CREATE TABLE pvcount_5min AS SELECT pageid, count(*) from enrichedpv window tumbling (size 5 minute) group by pageid; + + -- Number of views for each each reagion and gender combination for tumbling window of 15 seconds + -- when the view count is greater than 5 + CREATE TABLE pvcount_gender_region AS SELECT gender, regionid , count(*) from enrichedpv window tumbling (size 15 second) group by gender, regionid having count(*) > 5; diff --git a/ksql-examples/examples/clickstream-analysis/README.md b/ksql-examples/examples/clickstream-analysis/README.md new file mode 100644 index 000000000000..0db57ab13fab --- /dev/null +++ b/ksql-examples/examples/clickstream-analysis/README.md @@ -0,0 +1,167 @@ +# ClickStream Analysis + + + +### Prerequisites: +- Confluent 3.3.0 installed locally (default settings port:8083) +- ElasticSeach installed locally (default settings port:9200) +- Grafana installed locally (default settings port:3000) + + **Mac: [user$ brew install elasticsearch grafana]** + +- KSQL is downloaded and compiled [mvn package -Dmaven.test.skip=true] + +_**Prior: Run Elastic and Grafana on default ports**_ + +1. Start Confluent Platform: +``` +confluent-3.3.0 user$ ./bin/confluent start +Starting zookeeper +zookeeper is [UP] +Starting kafka +kafka is [UP] +Starting schema-registry +schema-registry is [UP] +Starting kafka-rest +kafka-rest is [UP] +Starting connect +connect is [UP] +``` + +2. Run Elastic and Grafana-Server +``` +user% elastic& +user% run-grafana.sh& +``` + +3. Run KSQL in local mode +``` +:ksql user$ ./bin/ksql-cli local + ====================================== + = _ __ _____ ____ _ = + = | |/ // ____|/ __ \| | = + = | ' /| (___ | | | | | = + = | < \___ \| | | | | = + = | . \ ____) | |__| | |____ = + = |_|\_\_____/ \___\_\______| = + = = + = Streaming Query Language for Kafka = +Copyright 2017 Confluent Inc. + +CLI v0.0.1, Server v0.0.1 located at http://localhost:9098 + +Having trouble? Type 'help' (case-insensitive) for a rundown of how things work! + +ksql> +``` + +4. Use DataGen to create the ClickStream +``` +:ksql user$ bin/ksql-datagen quickstart=clickstream format=json topic=clickstream_1 maxInterval=1000 iterations=5000 +111.168.57.122 --> ([ '111.168.57.122' | 12 | '-' | '15/Aug/2017:10:53:45 +0100' | 1502790825640 | 'GET /site/user_status.html HTTP/1.1' | '404' | '1289' | '-' | 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/59.0.3071.115 Safari/537.36' ]) +111.90.225.227 --> ([ '111.90.225.227' | 5 | '-' | '15/Aug/2017:10:53:46 +0100' | 1502790826930 | 'GET /images/logo-small.png HTTP/1.1' | '302' | '4006' | '-' | 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/59.0.3071.115 Safari/537.36' ]) +222.145.8.144 --> ([ '222.145.8.144' | 18 | '-' | '15/Aug/2017:10:53:47 +0100' | 1502790827645 | 'GET /site/user_status.html HTTP/1.1' | '200' | '4006' | '-' | 'Mozilla/5.0 (compatible; Googlebot/2.1; +http://www.google.com/bot.html)' ]) +``` +5. Use DataGen to create the status codes (one off to populate TABLE) +``` +:ksql user$ bin/ksql-datagen quickstart=clickstream_codes format=json topic=clickstream_codes maxInterval=100 iterations=100 +404 --> ([ 404 | 'Page not found' ]) +405 --> ([ 405 | 'Method not allowed' ]) +``` + +6. Use DataGen to create the set of Users (one off to populate TABLE) +``` +:ksql user$ bin/ksql-datagen quickstart=clickstream_users format=json topic=clickstream_users maxInterval=10 iterations=20 +1 --> ([ 1 | 1427769490698 | 'Abdel' | 'Adicot' | 'Frankfurt' | 'Gold' ]) +2 --> ([ 2 | 1411540260097 | 'Ferd' | 'Trice' | 'Palo Alto' | 'Platinum' ]) +3 --> ([ 3 | 1462725158453 | 'Antonio' | 'Adicot' | 'San Francisco' | 'Platinum' ]) +``` + + +7. Load the clickstream.sql schema file that will run the demo app +``` +ksql> run script 'ksql-examples/examples/clickstream-analysis/clickstream-schema.sql'; + + Message +------------------------------------ + Statement written to command topic +ksql> +``` + +8. Check that TABLEs are created +``` +ksql> list TABLES; + + Table Name | Kafka Topic | Format | Windowed +--------------------------------------------------------------------- + ERRORS_PER_MIN_ALERT | ERRORS_PER_MIN_ALERT | JSON | true + CLICKSTREAM_CODES_TS | CLICKSTREAM_CODES_TS | JSON | false + CLICKSTREAM_CODES | clickstream_codes_1 | JSON | false + PAGES_PER_MIN | PAGES_PER_MIN | JSON | true + EVENTS_PER_MIN_MAX_AVG | EVENTS_PER_MIN_MAX_AVG | JSON | true + ERRORS_PER_MIN | ERRORS_PER_MIN | JSON | true + EVENTS_PER_MIN | EVENTS_PER_MIN | JSON | true +``` + +9. Check that STREAMs are created +``` +ksql> list STREAMS; + + Stream Name | Kafka Topic | Format +---------------------------------------------------------------- + EVENTS_PER_MIN_MAX_AVG_TS | EVENTS_PER_MIN_MAX_AVG_TS | JSON + ERRORS_PER_MIN_TS | ERRORS_PER_MIN_TS | JSON + EVENTS_PER_MIN_TS | EVENTS_PER_MIN_TS | JSON + ERRORS_PER_MIN_ALERT_TS | ERRORS_PER_MIN_ALERT_TS | JSON + PAGES_PER_MIN_TS | PAGES_PER_MIN_TS | JSON + ENRICHED_ERROR_CODES_TS | ENRICHED_ERROR_CODES_TS | JSON + CLICKSTREAM | clickstream_1 | JSON +``` + +10. Ensure that data is being streamed through various TABLEs and STREAMs +``` +ksql> select * from CLICKSTREAM; +1502152008511 | 104.152.45.45 | 1502152008511 | 07/Aug/2017:17:26:48 -0700 | 104.152.45.45 | GET /index.html HTTP/1.1 | 404 | - | Mozilla/5.0 (compatible; Googlebot/2.1; +http://www.google.com/bot.html) +1502152008691 | 54.173.165.103 | 1502152008691 | 07/Aug/2017:17:26:48 -0700 | 54.173.165.103 | GET /index.html HTTP/1.1 | 406 | - | Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/59.0.3071.115 Safari/537.36 +1502152009077 | 66.249.79.93 | 1502152009077 | 07/Aug/2017:17:26:49 -0700 | 66.249.79.93 | GET /site/user_status.html HTTP/1.1 | 200 | - | Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/59.0.3071.115 Safari/537.36 +1502152009575 | 89.203.236.146 | 1502152009575 | 07/Aug/2017:17:26:49 -0700 | 89.203.236.146 | GET /site/user_status.html HTTP/1.1 | 302 | - | Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/59.0.3071.115 Safari/537.36 +1502152009679 | 172.245.174.248 | 1502152009679 | 07/Aug/2017:17:26:49 -0700 | 172.245.174.248 | GET /site/user_status.html HTTP/1.1 | 406 | - | Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/59.0.3071.115 Safari/537.36 +^CQuery terminated +ksql> select * from EVENTS_PER_MIN_TS; +1502152015000 | -]�<�� | 1502152015000 | - | 13 +1502152020000 | -]�<� | 1502152020000 | - | 6 +^CQuery terminated +ksql> select * from PAGES_PER_MIN; +1502152040000 | - : Window{start=1502152040000 end=9223372036854775807} | - | 6 +1502152040000 | - : Window{start=1502152040000 end=9223372036854775807} | - | 7 +1502152045000 | - : Window{start=1502152045000 end=9223372036854775807} | - | 4 +1502152045000 | - : Window{start=1502152045000 end=9223372036854775807} | - | 5 +^CQuery terminated +ksql> +``` +11. Send KSQL Tables=>Connect=>Elastic=>Grafana +``` +ksql user$ cd ksql-examples/examples/clickstream-analysis/ +user$ ./ksql-tables-to-grafana.sh +{<>} +user$ +``` + +12. Load the dashboard into Grafana +``` +user$ ./clickstream-analysis-dashboard.sh +{"slug":"click-stream-analysis","status":"success","version":5} +user$ +``` + +13. View the ClickStream Dashboard +``` +Navigate to http://localhost:3000/dashboard/db/click-stream-analysis +``` + +Interesting things to try: +* Understand how the clickstream-schema.sql file is structured. We use a DataGen.KafkaTopic.clickstream_1 -> Stream -> Table (for window & analytics with group-by) -> Table (to Add EVENT_TS for time-index) -> ElastiSearch/Connect topic +* try 'list topics' to see where data is persisted +* try 'list tables' +* try 'list streams' +* type: history diff --git a/ksql-examples/examples/clickstream-analysis/clickstream-analysis-dashboard.json b/ksql-examples/examples/clickstream-analysis/clickstream-analysis-dashboard.json new file mode 100644 index 000000000000..6450fdcf5624 --- /dev/null +++ b/ksql-examples/examples/clickstream-analysis/clickstream-analysis-dashboard.json @@ -0,0 +1,978 @@ +{ + "dashboard": { + "__inputs": [ + { + "name": "errors_per_min_alert_ts", + "label": "errors_per_min_alert_ts", + "description": "", + "type": "datasource", + "pluginId": "elasticsearch", + "pluginName": "Elasticsearch" + }, + { + "name": "errors_per_min_alert_ts", + "label": "errors_per_min_alert_ts", + "description": "", + "type": "datasource", + "pluginId": "elasticsearch", + "pluginName": "Elasticsearch" + }, + { + "name": "errors_per_min_alert_ts", + "label": "errors_per_min_alert_ts", + "description": "", + "type": "datasource", + "pluginId": "elasticsearch", + "pluginName": "Elasticsearch" + }, + { + "name": "events_per_min_ts", + "label": "events_per_min_ts", + "description": "", + "type": "datasource", + "pluginId": "elasticsearch", + "pluginName": "Elasticsearch" + }, + { + "name": "events_per_min_max_avg_ts", + "label": "events_per_min_max_avg_ts", + "description": "", + "type": "datasource", + "pluginId": "elasticsearch", + "pluginName": "Elasticsearch" + }, + { + "name": "pages_per_min_ts", + "label": "pages_per_min_ts", + "description": "", + "type": "datasource", + "pluginId": "elasticsearch", + "pluginName": "Elasticsearch" + } + ], + "__requires": [ + { + "type": "datasource", + "id": "elasticsearch", + "name": "Elasticsearch", + "version": "3.0.0" + }, + { + "type": "grafana", + "id": "grafana", + "name": "Grafana", + "version": "4.4.1" + }, + { + "type": "panel", + "id": "graph", + "name": "Graph", + "version": "" + }, + { + "type": "panel", + "id": "table", + "name": "Table", + "version": "" + } + ], + "annotations": { + "list": [] + }, + "editable": true, + "gnetId": null, + "graphTooltip": 0, + "hideControls": false, + "id": null, + "links": [], + "refresh": "5s", + "rows": [ + { + "collapse": false, + "height": 312, + "panels": [ + { + "aliasColors": {}, + "bars": false, + "dashLength": 10, + "dashes": false, + "datasource": "errors_per_min_alert_ts", + "fill": 1, + "id": 7, + "legend": { + "alignAsTable": true, + "avg": false, + "current": false, + "max": true, + "min": false, + "rightSide": false, + "show": true, + "total": true, + "values": true + }, + "lines": false, + "linewidth": 1, + "links": [], + "nullPointMode": "null", + "percentage": false, + "pointradius": 5, + "points": true, + "renderer": "flot", + "seriesOverrides": [], + "spaceLength": 10, + "span": 6, + "stack": false, + "steppedLine": false, + "targets": [ + { + "bucketAggs": [ + { + "fake": true, + "field": "STATUS", + "id": "4", + "settings": { + "min_doc_count": 1, + "order": "desc", + "orderBy": "_term", + "size": "10" + }, + "type": "terms" + }, + { + "fake": true, + "field": "EVENT_TS", + "id": "3", + "settings": { + "interval": "auto", + "min_doc_count": 0, + "trimEdges": 0 + }, + "type": "date_histogram" + } + ], + "dsType": "elasticsearch", + "metrics": [ + { + "field": "ERRORS", + "id": "1", + "meta": {}, + "settings": {}, + "type": "max" + } + ], + "refId": "A", + "timeField": "EVENT_TS" + } + ], + "thresholds": [], + "timeFrom": null, + "timeShift": null, + "title": "400 error status alert HAVING count(*) > 5", + "tooltip": { + "shared": true, + "sort": 0, + "value_type": "individual" + }, + "type": "graph", + "xaxis": { + "buckets": null, + "mode": "time", + "name": null, + "show": true, + "values": [] + }, + "yaxes": [ + { + "format": "short", + "label": null, + "logBase": 1, + "max": null, + "min": null, + "show": true + }, + { + "format": "short", + "label": null, + "logBase": 1, + "max": null, + "min": null, + "show": true + } + ] + }, + { + "columns": [ + { + "text": "CODE", + "value": "CODE" + }, + { + "text": "DEFINITION", + "value": "DEFINITION" + }, + { + "text": "Average", + "value": "Average" + } + ], + "datasource": "enriched_error_codes_ts", + "fontSize": "100%", + "id": 9, + "links": [], + "pageSize": null, + "scroll": true, + "showHeader": true, + "sort": { + "col": 0, + "desc": true + }, + "span": 6, + "styles": [ + { + "alias": "Code", + "colorMode": null, + "colors": [ + "rgba(245, 54, 54, 0.9)", + "rgba(237, 129, 40, 0.89)", + "rgba(50, 172, 45, 0.97)" + ], + "dateFormat": "YYYY-MM-DD HH:mm:ss", + "decimals": 0, + "pattern": "CODE", + "thresholds": [], + "type": "number", + "unit": "short" + }, + { + "alias": "Definition", + "colorMode": null, + "colors": [ + "rgba(245, 54, 54, 0.9)", + "rgba(237, 129, 40, 0.89)", + "rgba(50, 172, 45, 0.97)" + ], + "dateFormat": "YYYY-MM-DD HH:mm:ss", + "decimals": 2, + "pattern": "DEFINITION", + "thresholds": [], + "type": "number", + "unit": "short" + }, + { + "alias": "Count", + "colorMode": "cell", + "colors": [ + "rgba(245, 54, 54, 0.9)", + "rgba(237, 129, 40, 0.89)", + "rgba(50, 172, 45, 0.97)" + ], + "dateFormat": "YYYY-MM-DD HH:mm:ss", + "decimals": 0, + "pattern": "Average", + "thresholds": [ + "3" + ], + "type": "number", + "unit": "short" + }, + { + "alias": "", + "colorMode": null, + "colors": [ + "rgba(245, 54, 54, 0.9)", + "rgba(237, 129, 40, 0.89)", + "rgba(50, 172, 45, 0.97)" + ], + "decimals": 2, + "pattern": "/.*/", + "thresholds": [], + "type": "number", + "unit": "short" + } + ], + "targets": [ + { + "bucketAggs": [ + { + "fake": true, + "field": "CODE", + "id": "3", + "settings": { + "min_doc_count": 1, + "order": "desc", + "orderBy": "_term", + "size": "10" + }, + "type": "terms" + }, + { + "field": "DEFINITION", + "id": "2", + "settings": { + "min_doc_count": 1, + "order": "desc", + "orderBy": "_term", + "size": "10" + }, + "type": "terms" + } + ], + "dsType": "elasticsearch", + "metrics": [ + { + "field": "COUNT", + "id": "1", + "meta": {}, + "settings": {}, + "type": "avg" + } + ], + "refId": "A", + "timeField": "EVENT_TS" + } + ], + "title": "TABLE-STREAM JOIN between ClickStream and Codes Table", + "transform": "json", + "type": "table" + } + ], + "repeat": null, + "repeatIteration": null, + "repeatRowId": null, + "showTitle": false, + "title": "Dashboard Row", + "titleSize": "h6" + }, + { + "collapse": false, + "height": 299, + "panels": [ + { + "aliasColors": {}, + "bars": false, + "dashLength": 10, + "dashes": false, + "datasource": "click_user_sessions_ts", + "fill": 1, + "id": 8, + "legend": { + "alignAsTable": true, + "avg": false, + "current": false, + "hideEmpty": true, + "hideZero": true, + "max": false, + "min": false, + "rightSide": true, + "show": true, + "total": false, + "values": false + }, + "lines": true, + "linewidth": 1, + "links": [], + "nullPointMode": "null", + "percentage": false, + "pointradius": 5, + "points": true, + "renderer": "flot", + "seriesOverrides": [], + "spaceLength": 10, + "span": 12, + "stack": false, + "steppedLine": false, + "targets": [ + { + "bucketAggs": [ + { + "fake": true, + "field": "IP", + "id": "3", + "settings": { + "min_doc_count": 1, + "order": "desc", + "orderBy": "_term", + "size": "10" + }, + "type": "terms" + }, + { + "field": "EVENT_TS", + "id": "2", + "settings": { + "interval": "10s", + "min_doc_count": 0, + "trimEdges": 0 + }, + "type": "date_histogram" + } + ], + "dsType": "elasticsearch", + "metrics": [ + { + "field": "select field", + "id": "1", + "type": "count" + } + ], + "refId": "A", + "timeField": "EVENT_TS" + } + ], + "thresholds": [], + "timeFrom": null, + "timeShift": null, + "title": "User Sessionisation by IP", + "tooltip": { + "shared": true, + "sort": 0, + "value_type": "individual" + }, + "type": "graph", + "xaxis": { + "buckets": null, + "mode": "time", + "name": null, + "show": true, + "values": [] + }, + "yaxes": [ + { + "format": "short", + "label": null, + "logBase": 1, + "max": null, + "min": null, + "show": true + }, + { + "format": "short", + "label": null, + "logBase": 1, + "max": null, + "min": null, + "show": true + } + ] + } + ], + "repeat": null, + "repeatIteration": null, + "repeatRowId": null, + "showTitle": false, + "title": "Dashboard Row", + "titleSize": "h6" + }, + { + "collapse": false, + "height": 231, + "panels": [ + { + "aliasColors": {}, + "bars": false, + "dashLength": 10, + "dashes": false, + "datasource": "events_per_min_ts", + "fill": 1, + "id": 2, + "legend": { + "alignAsTable": true, + "avg": true, + "current": true, + "max": false, + "min": false, + "show": true, + "total": true, + "values": true + }, + "lines": true, + "linewidth": 1, + "links": [], + "nullPointMode": "null", + "percentage": false, + "pointradius": 5, + "points": true, + "renderer": "flot", + "seriesOverrides": [], + "spaceLength": 10, + "span": 6, + "stack": false, + "steppedLine": false, + "targets": [ + { + "bucketAggs": [ + { + "field": "EVENT_TS", + "id": "2", + "settings": { + "interval": "10s", + "min_doc_count": 0, + "trimEdges": 0 + }, + "type": "date_histogram" + } + ], + "dsType": "elasticsearch", + "metrics": [ + { + "field": "EVENTS", + "id": "1", + "meta": {}, + "settings": {}, + "type": "max" + } + ], + "refId": "A", + "timeField": "EVENT_TS" + } + ], + "thresholds": [], + "timeFrom": null, + "timeShift": null, + "title": "Count all Events grouped by time bucket", + "tooltip": { + "shared": true, + "sort": 0, + "value_type": "individual" + }, + "type": "graph", + "xaxis": { + "buckets": null, + "mode": "time", + "name": null, + "show": true, + "values": [] + }, + "yaxes": [ + { + "format": "short", + "label": null, + "logBase": 1, + "max": null, + "min": null, + "show": true + }, + { + "format": "short", + "label": null, + "logBase": 1, + "max": null, + "min": null, + "show": true + } + ] + }, + { + "aliasColors": {}, + "bars": false, + "dashLength": 10, + "dashes": false, + "datasource": "events_per_min_max_avg_ts", + "fill": 1, + "id": 1, + "legend": { + "alignAsTable": true, + "avg": true, + "current": false, + "hideEmpty": false, + "hideZero": false, + "max": false, + "min": false, + "rightSide": false, + "show": true, + "total": true, + "values": true + }, + "lines": true, + "linewidth": 1, + "links": [], + "nullPointMode": "null", + "percentage": false, + "pointradius": 3, + "points": true, + "renderer": "flot", + "seriesOverrides": [], + "spaceLength": 10, + "span": 6, + "stack": false, + "steppedLine": false, + "targets": [ + { + "bucketAggs": [ + { + "field": "EVENT_TS", + "id": "2", + "settings": { + "interval": "10s", + "min_doc_count": 0, + "trimEdges": 0 + }, + "type": "date_histogram" + } + ], + "dsType": "elasticsearch", + "metrics": [ + { + "field": "MAX", + "id": "1", + "meta": {}, + "settings": {}, + "type": "max" + } + ], + "refId": "A", + "timeField": "EVENT_TS" + }, + { + "bucketAggs": [ + { + "field": "EVENT_TS", + "id": "2", + "settings": { + "interval": "10s", + "min_doc_count": 0, + "trimEdges": 0 + }, + "type": "date_histogram" + } + ], + "dsType": "elasticsearch", + "metrics": [ + { + "field": "MAX", + "id": "1", + "meta": {}, + "settings": {}, + "type": "min" + } + ], + "refId": "B", + "timeField": "EVENT_TS" + }, + { + "bucketAggs": [ + { + "field": "EVENT_TS", + "id": "2", + "settings": { + "interval": "10s", + "min_doc_count": 0, + "trimEdges": 0 + }, + "type": "date_histogram" + } + ], + "dsType": "elasticsearch", + "metrics": [ + { + "field": "AVG", + "id": "1", + "meta": {}, + "settings": {}, + "type": "avg" + } + ], + "refId": "C", + "timeField": "EVENT_TS" + } + ], + "thresholds": [], + "timeFrom": null, + "timeShift": null, + "title": "Min Max Avg events using UDFs", + "tooltip": { + "shared": true, + "sort": 0, + "value_type": "individual" + }, + "type": "graph", + "xaxis": { + "buckets": null, + "mode": "time", + "name": null, + "show": true, + "values": [] + }, + "yaxes": [ + { + "format": "short", + "label": null, + "logBase": 1, + "max": null, + "min": null, + "show": true + }, + { + "format": "short", + "label": null, + "logBase": 1, + "max": null, + "min": null, + "show": true + } + ] + } + ], + "repeat": null, + "repeatIteration": null, + "repeatRowId": null, + "showTitle": false, + "title": "Dashboard Row", + "titleSize": "h6" + }, + { + "collapse": false, + "height": 250, + "panels": [ + { + "aliasColors": {}, + "bars": true, + "dashLength": 10, + "dashes": false, + "datasource": "enriched_error_codes_ts", + "fill": 1, + "id": 5, + "legend": { + "avg": false, + "current": false, + "max": false, + "min": false, + "show": true, + "total": false, + "values": false + }, + "lines": false, + "linewidth": 1, + "links": [], + "nullPointMode": "null", + "percentage": false, + "pointradius": 5, + "points": false, + "renderer": "flot", + "seriesOverrides": [], + "spaceLength": 10, + "span": 6, + "stack": true, + "steppedLine": false, + "targets": [ + { + "bucketAggs": [ + { + "fake": true, + "field": "DEFINITION", + "id": "3", + "settings": { + "min_doc_count": 1, + "order": "desc", + "orderBy": "_term", + "size": "10" + }, + "type": "terms" + }, + { + "field": "EVENT_TS", + "id": "2", + "settings": { + "interval": "1m", + "min_doc_count": 0, + "trimEdges": 0 + }, + "type": "date_histogram" + } + ], + "dsType": "elasticsearch", + "metrics": [ + { + "field": "STATUS", + "id": "1", + "meta": {}, + "settings": {}, + "type": "max" + } + ], + "refId": "A", + "timeField": "EVENT_TS" + } + ], + "thresholds": [], + "timeFrom": null, + "timeShift": null, + "title": "Status codes", + "tooltip": { + "shared": true, + "sort": 0, + "value_type": "individual" + }, + "type": "graph", + "xaxis": { + "buckets": null, + "mode": "time", + "name": null, + "show": true, + "values": [] + }, + "yaxes": [ + { + "format": "short", + "label": null, + "logBase": 1, + "max": null, + "min": null, + "show": true + }, + { + "format": "short", + "label": null, + "logBase": 1, + "max": null, + "min": null, + "show": true + } + ] + }, + { + "aliasColors": {}, + "bars": false, + "dashLength": 10, + "dashes": false, + "datasource": "pages_per_min_ts", + "fill": 1, + "id": 3, + "legend": { + "alignAsTable": true, + "avg": true, + "current": false, + "max": false, + "min": false, + "rightSide": false, + "show": true, + "total": false, + "values": true + }, + "lines": true, + "linewidth": 1, + "links": [], + "nullPointMode": "null", + "percentage": false, + "pointradius": 5, + "points": true, + "renderer": "flot", + "seriesOverrides": [], + "spaceLength": 10, + "span": 6, + "stack": false, + "steppedLine": false, + "targets": [ + { + "bucketAggs": [ + { + "field": "EVENT_TS", + "id": "2", + "settings": { + "interval": "10s", + "min_doc_count": 0, + "trimEdges": 0 + }, + "type": "date_histogram" + } + ], + "dsType": "elasticsearch", + "metrics": [ + { + "field": "PAGES", + "id": "1", + "meta": {}, + "settings": {}, + "type": "avg" + } + ], + "refId": "A", + "timeField": "EVENT_TS" + } + ], + "thresholds": [], + "timeFrom": null, + "timeShift": null, + "title": "Page Views using LIKE filtering", + "tooltip": { + "shared": true, + "sort": 0, + "value_type": "individual" + }, + "type": "graph", + "xaxis": { + "buckets": null, + "mode": "time", + "name": null, + "show": true, + "values": [] + }, + "yaxes": [ + { + "format": "short", + "label": null, + "logBase": 1, + "max": null, + "min": null, + "show": true + }, + { + "format": "short", + "label": null, + "logBase": 1, + "max": null, + "min": null, + "show": true + } + ] + } + ], + "repeat": null, + "repeatIteration": null, + "repeatRowId": null, + "showTitle": false, + "title": "Dashboard Row", + "titleSize": "h6" + } + ], + "schemaVersion": 14, + "style": "dark", + "tags": [], + "templating": { + "list": [] + }, + "time": { + "from": "now-15m", + "to": "now" + }, + "timepicker": { + "refresh_intervals": [ + "5s", + "10s", + "30s", + "1m", + "5m", + "15m", + "30m", + "1h", + "2h", + "1d" + ], + "time_options": [ + "5m", + "15m", + "1h", + "6h", + "12h", + "24h", + "2d", + "7d", + "30d" + ] + }, + "timezone": "", + "title": "Click Stream Analysis", + "version": 26 + }, + "overwrite": false +} \ No newline at end of file diff --git a/ksql-examples/examples/clickstream-analysis/clickstream-analysis-dashboard.sh b/ksql-examples/examples/clickstream-analysis/clickstream-analysis-dashboard.sh new file mode 100755 index 000000000000..5a5305383921 --- /dev/null +++ b/ksql-examples/examples/clickstream-analysis/clickstream-analysis-dashboard.sh @@ -0,0 +1,13 @@ +#!/usr/bin/env bash +#curl -X "GET" "http://localhost:3000/api/dashboards/db/click-stream-analysis" \ +# -H "Content-Type: application/json" \ +# --user admin:admin + + +echo "Loading Grafana ClickStream Dashboard" + +curl -X "POST" "http://localhost:3000/api/dashboards/db" \ + -H "Content-Type: application/json" \ + --user admin:admin \ + --data-binary @clickstream-analysis-dashboard.json + diff --git a/ksql-examples/examples/clickstream-analysis/clickstream-schema.sql b/ksql-examples/examples/clickstream-analysis/clickstream-schema.sql new file mode 100644 index 000000000000..835066a934e1 --- /dev/null +++ b/ksql-examples/examples/clickstream-analysis/clickstream-schema.sql @@ -0,0 +1,117 @@ +-- lets the windows accumulate more data +set 'commit.interval.ms'='2000'; +set 'cache.max.bytes.buffering'='10000000'; + + +-- 1. SOURCE of ClickStream +DROP STREAM clickstream; +CREATE STREAM clickstream (_time bigint,time varchar, ip varchar, request varchar, status int, userid int, bytes bigint, agent varchar) with (kafka_topic = 'clickstream_1', value_format = 'json'); + + +-- 2. Derive raw EVENTS_PER_MIN + + -- number of events per minute - think about key-for-distribution-purpose - shuffling etc - shouldnt use 'userid' +DROP TABLE events_per_min; +create table events_per_min as select userid, count(*) as events from clickstream window TUMBLING (size 10 second) group by userid; + +-- VIEW - Enrich with rowTime +DROP TABLE events_per_min_ts; +CREATE TABLE events_per_min_ts as select rowTime as event_ts, * from events_per_min; + +-- VIEW +DROP TABLE events_per_min_max_avg; +DROP TABLE events_per_min_max_avg_ts; +create table events_per_min_max_avg as select userid, min(events) as min, max(events) as max, sum(events)/count(events) as avg from events_per_min WINDOW TUMBLING (size 10 second) group by userid; +create table events_per_min_max_avg_ts as select rowTime as event_ts, * from events_per_min_max_avg; + + +-- 3. BUILD STATUS_CODES +-- static table +DROP TABLE clickstream_codes; +CREATE TABLE clickstream_codes (code int, definition varchar) with (key='code', kafka_topic = 'clickstream_codes_1', value_format = 'json'); + +-- need to add _TS for ElasticSearch +DROP TABLE clickstream_codes_ts; +create table clickstream_codes_ts as select rowTime as event_ts, * from clickstream_codes; + + +-- 4. BUILD PAGE_VIEWS +DROP TABLE pages_per_min; +create table pages_per_min as select userid, count(*) as pages from clickstream WINDOW HOPPING (size 10 second, advance by 5 second) WHERE request like '%html%' group by userid ; + + -- VIEW add timestamp +DROP TABLE pages_per_min_ts; +CREATE TABLE pages_per_min_ts as select rowTime as event_ts, * from pages_per_min; + + -- 4. URL STATUS CODES (Join AND Alert) + +-- Use 'HAVING' Filter to show ERROR codes > 400 where count > 5 +DROP TABLE ERRORS_PER_MIN_ALERT; +create TABLE ERRORS_PER_MIN_ALERT as select status, count(*) as errors from clickstream window HOPPING ( size 30 second, advance by 20 second) WHERE status > 400 group by status HAVING count(*) > 5 AND count(*) is not NULL; +DROP TABLE ERRORS_PER_MIN_ALERT_TS; +CREATE TABLE ERRORS_PER_MIN_ALERT_TS as select rowTime as event_ts, * from ERRORS_PER_MIN_ALERT; + + +DROP TABLE ERRORS_PER_MIN; +create table ERRORS_PER_MIN as select status, count(*) as errors from clickstream window HOPPING ( size 10 second, advance by 5 second) WHERE status > 400 group by status; + + +--VIEW - Enrich with timestamp +DROP TABLE ERRORS_PER_MIN_TS; +CREATE TABLE ERRORS_PER_MIN_TS as select rowTime as event_ts, * from ERRORS_PER_MIN; + +-- VIEW - Enrich Codes with errors with Join to Status-Code definition +DROP STREAM ENRICHED_ERROR_CODES; +DROP TABLE ENRICHED_ERROR_CODES_COUNT; +DROP STREAM ENRICHED_ERROR_CODES_TS; + +--Join using a STREAM +CREATE STREAM ENRICHED_ERROR_CODES AS SELECT code, definition FROM clickstream LEFT JOIN clickstream_codes ON clickstream.status = clickstream_codes.code; +-- Aggregate (count&groupBy) using a TABLE-Window +CREATE TABLE ENRICHED_ERROR_CODES_COUNT AS SELECT code, definition, COUNT(*) AS count FROM ENRICHED_ERROR_CODES WINDOW TUMBLING (size 30 second) GROUP BY code, definition HAVING COUNT(*) > 1; +-- Enrich w rowTime timestamp to suport timeseries search +CREATE TABLE ENRICHED_ERROR_CODES_TS AS SELECT rowTime as EVENT_TS, * FROM ENRICHED_ERROR_CODES_COUNT; + + +-- 5 Sessionisation using IP addresses - 300 seconds of inactivity expires the session +DROP TABLE CLICK_USER_SESSIONS; +DROP TABLE CLICK_USER_SESSIONS_TS; +create TABLE CLICK_USER_SESSIONS as SELECT ip, count(*) as events FROM clickstream window SESSION (300 second) GROUP BY ip; +create TABLE CLICK_USER_SESSIONS_TS as SELECT rowTime as event_ts, * from CLICK_USER_SESSIONS; + + +-- Demo Blog Article tracking user-session-kbytes + +--DROP TABLE PER_USER_KBYTES; +--create TABLE PER_USER_KBYTES as SELECT ip, sum(bytes)/1024 as kbytes FROM clickstream window SESSION (300 second) GROUP BY ip; + +--DROP TABLE PER_USER_KBYTES_TS; +--CREATE TABLE PER_USER_KBYTES_TS as select rowTime as event_ts, kbytes, ip from PER_USER_KBYTES WHERE ip IS NOT NULL; + +--DROP TABLE PER_USER_KBYTES_ALERT; +--create TABLE PER_USER_KBYTES_ALERT as SELECT ip, sum(bytes)/1024 as kbytes FROM clickstream window SESSION (300 second) GROUP BY ip HAVING sum(bytes)/1024 > 5; + + +-- Clickstream users for enrichment and exception monitoring + +-- users lookup table +DROP TABLE web_users; +CREATE TABLE web_users (user_id int, registered_At long, first_name varchar, last_name varchar, city varchar, level varchar) with (key='user_id', kafka_topic = 'clickstream_users', value_format = 'json'); + +-- Clickstream enriched with user account data +--DROP STREAM customer_clickstream +--CREATE STREAM customer_clickstream WITH (PARTITIONS=2) as SELECT userid, u.first_name, u.last_name, u.level, time, ip, request, status, agent FROM clickstream c LEFT JOIN web_users u ON c.userid = u.user_id; + +-- Find error views by important users +--DROP STREAM platinum_customers_with_errors +--create stream platinum_customers_with_errors WITH (PARTITIONS=2) as seLECT * FROM customer_clickstream WHERE status > 400 AND level = 'Platinum'; + +-- Find error views by important users in one shot +DROP STREAM platinum_errors; +CREATE STREAM platinum_errors WITH (PARTITIONS=2) as SELECT userid, u.first_name, u.last_name, u.city, u.level, time, ip, request, status, agent FROM clickstream c LEFT JOIN web_users u ON c.userid = u.user_id WHERE status > 400 AND level = 'Platinum'; + +-- Trend of errors from important users +DROP TABLE platinum_page_errors_per_5_min; +CREATE TABLE platinum_errors_per_5_min AS SELECT userid, first_name, last_name, city, count(*) as running_count FROM platinum_errors WINDOW TUMBLING (SIZE 5 MINUTE) WHERE request LIKE '%html%' GROUP BY userid, first_name, last_name, city; + + diff --git a/ksql-examples/examples/clickstream-analysis/connect-config/README.md b/ksql-examples/examples/clickstream-analysis/connect-config/README.md new file mode 100644 index 000000000000..0f0f8dc2760a --- /dev/null +++ b/ksql-examples/examples/clickstream-analysis/connect-config/README.md @@ -0,0 +1,25 @@ +Place the jar into: confluent-3.3.0/share/java/kafka-connect-elasticsearch/ + + Also configure the properties when adding via REST + +``` curl -X "POST" "http://localhost:8083/connectors/" \ + -H "Content-Type: application/json" \ + -d $'{ + "name": "es_sink_'$TABLE_NAME'", + "config": { + "schema.ignore": "true", + "topics": "'$TABLE_NAME'", + "key.converter": "org.apache.kafka.connect.storage.StringConverter", + "value.converter.schemas.enable": false, + "connector.class": "io.confluent.connect.elasticsearch.ElasticsearchSinkConnector", + "key.ignore": "true", + "value.converter": "org.apache.kafka.connect.json.JsonConverter", + "type.name": "type.name=kafkaconnect", + "topic.index.map": "'$TABLE_NAME':'$table_name'", + "connection.url": "http://localhost:9200", + "transforms": "FilterNulls", + "transforms.FilterNulls.type": "io.confluent.transforms.NullFilter" + } + }' + ``` + diff --git a/ksql-examples/examples/clickstream-analysis/connect-config/connect.properties b/ksql-examples/examples/clickstream-analysis/connect-config/connect.properties new file mode 100644 index 000000000000..24bd2c886ae3 --- /dev/null +++ b/ksql-examples/examples/clickstream-analysis/connect-config/connect.properties @@ -0,0 +1,2 @@ +transforms=FilterNulls +transforms.FilterNulls.type=io.confluent.transforms.NullFilter diff --git a/ksql-examples/examples/clickstream-analysis/connect-config/null-filter-4.0.0-SNAPSHOT.jar b/ksql-examples/examples/clickstream-analysis/connect-config/null-filter-4.0.0-SNAPSHOT.jar new file mode 100644 index 0000000000000000000000000000000000000000..1179907c273b70f06138eb303c136fa03844ae04 GIT binary patch literal 3119 zcmb7G3pkT~8=pg(P`u6w4M}7Sdp}V*#Krk<&fksqQqF%I!PKKN?G+b zEadd&u+SSX$-;;^06VCN@+mMFPCK;rm zeq%kSXovALQ!bC7-%!7%(^Y%ofWgM;&vn6zFGLtNV*TI6{U&$VT+qE3z{Lug>pIE&S*)Ddd z+3Ic>Iqf2EFpke0Vh>ESMkQ~w5=v1nUBewy=(;KA+uQUKZ7-8f8CM;7aocRFZkNQ? z1Z?YOZ6@;~{~oDHu{tI9YW(vAifoeK^p=6J*<(4wm*V*9v@_Iw5Q;;{M=NE;2$*;F zh*VGSmDdubc_9dT9y-ur`b09CbNKQuX_J;zk(3|Q4M*PZF5lq;;z zIpzFXe3*4kp7yEq7Wo4Y5pq^{lw1g3mr^2kBl;?W-fzG1x;)ZxY(-SYco(E9)q`tV z_gsypQ_5G);N?5_xpzxL!?BAYMfiJwZql$;vr>bl$Er(Lpm~>@? z|7kt`VTXg%og@=qbxdZ`h=X|PvVcTtkxEP^Oz(7t2KQCu<#**VwC1bQFCU>8Bls_&is zaM1q;X8>XrZ1YVdR)8AAgP&m|ggb);x0%r|WWnPCp&@|*1@4`=7Gv3yqxzE?Zr)htokFc|aZ=P(&L(fjpd>ML&*1i~&fh3l7d2NzN7HO*sS?OHA%e&e)b+atw?UIx+ zxe8i-1y9Tx3IGu6eyWhBfp^qn+3xX!^m$Z!fYqBs9*}(BjIv8U zg}n<6)vWNuK8_@bawDt|bkf4SsBl?_B8$Y0k+*Q^hkjlCv_Mius?G6zThy6Ei1tnjem)jdmH` zK2J32L3iiE)6%=s4UmhlX9 z$%J+*wSpONS-`;>CR|kp<#WY}A>ZCiuY@rBw@AD(U*gc+i*w?dq${yHmE*6G4-`0a zb1>+^!>rSg=R<>eA7lz!Ycf0WgtI=~Xuh|JC4>F36TUQUoC9j16|*P(b34rO^s^eP1VocwMaO z!8821C$-`JI~x$8cdterP7HiV^4kG6(?}~nt+hs*z1)1fC?k5kQ=VpA?clxq+){C6 z?Tx|pT&u1e=W43tE6I8?o{^C>9-=?H6pMGa7`}N)6UPWcxpe$?h+k*u)rdQEcz zo?0o=F%cT}l%3tmYTI*kEdRV$S5N2bw=XU4Pd`dk<4SG$d%xDs9DgN;5%sa)`%0GW z{g8Z(n!Zo=W;0R%_)i`0crqaX|7z(u@c)?hfB!{1;R!fAh7jb3A|_|;&jXx3GGw2cv^IzghsE<53A$L^4uyE`eA!0IKNt&EFFY85RSj+rb1Xs7$r z*=G+}I13dcQ!}UFZtIV6c3TY6+}=)_^aP#c`0(7OYD~yFd$lyBb|}$^Z=Uleax3?| zmo>dREe7!?Gf@RKtfVYCBAVBz9{dp)LVCD=6T`jU8^cy(? zWSQ?-m>+=a`HL~4+!x8RP~I#y2Y_4V45$qvE;Pw6O!M=`0Ir!2MtvXr4Sy_F#(Z-? z8S(!k)3;1CKQe%egi!@>(HCPxHU2`@|1#6UGbntl0bqVHMwABlyUBgiyl}}t@_CrL WB9{o*U!Va5kEY;Q&I&StK>q<58B&b^ literal 0 HcmV?d00001 diff --git a/ksql-examples/examples/clickstream-analysis/elastic-dynamic-template.sh b/ksql-examples/examples/clickstream-analysis/elastic-dynamic-template.sh new file mode 100755 index 000000000000..1589e111327a --- /dev/null +++ b/ksql-examples/examples/clickstream-analysis/elastic-dynamic-template.sh @@ -0,0 +1,36 @@ +#!/usr/bin/env bash + +echo "Loading Elastic Dynamic Template to ensure _TS fields are used for TimeStamp" + +curl -XPUT "http://localhost:9200/_template/kafkaconnect/" -H 'Content-Type: application/json' -d' +{ + "template": "*", + "settings": { + "number_of_shards": 1, + "number_of_replicas": 0 + }, + "mappings": { + "_default_": { + "dynamic_templates": [ +{ + "dates": { + "match": "*_TS", + "mapping": { + "type": "date" + } + } + }, + { + "non_analysed_string_template": { + "match": "*", + "match_mapping_type": "string", + "mapping": { + "type": "string", + "index": "not_analyzed" + } + } + } + ] + } + } +}' \ No newline at end of file diff --git a/ksql-examples/examples/clickstream-analysis/ksql-connect-es-grafana.sh b/ksql-examples/examples/clickstream-analysis/ksql-connect-es-grafana.sh new file mode 100755 index 000000000000..27fb370e0ae3 --- /dev/null +++ b/ksql-examples/examples/clickstream-analysis/ksql-connect-es-grafana.sh @@ -0,0 +1,62 @@ +#!/usr/bin/env bash + +## An "all-in-once script" to load up a new table and connect all of the relevant parts to allow data to pipe through from KSQL.KafkaTopic->Connect->Elastic->Grafana[DataSource] +## Accepts a KSQL TABLE_NAME where the data is to be sourced from. + + +if [ "$#" -ne 1 ]; then + echo "Usage: ksql-connect-es-grafana.sh " + exit; +fi + +table_name=$1 +TABLE_NAME=`echo $1 | tr '[a-z]' '[A-Z]'` + + +echo "Connecting:" $table_name + +## Load the _TS dynamic template into ELASTIC +./elastic-dynamic-template.sh + +# Tell Kafka to send this Table-Topic to Elastic +echo "Adding Elastic Source\n\n" + + +curl -X "POST" "http://localhost:8083/connectors/" \ + -H "Content-Type: application/json" \ + -d $'{ + "name": "es_sink_'$TABLE_NAME'", + "config": { + "schema.ignore": "true", + "topics": "'$TABLE_NAME'", + "key.converter": "org.apache.kafka.connect.storage.StringConverter", + "value.converter.schemas.enable": false, + "connector.class": "io.confluent.connect.elasticsearch.ElasticsearchSinkConnector", + "key.ignore": "true", + "value.converter": "org.apache.kafka.connect.json.JsonConverter", + "type.name": "type.name=kafkaconnect", + "topic.index.map": "'$TABLE_NAME':'$table_name'", + "connection.url": "http://localhost:9200", + "transforms": "FilterNulls", + "transforms.FilterNulls.type": "io.confluent.transforms.NullFilter" + } +}' + + +echo "" +echo "Adding Grafana Source" + +## Add the Elastic DataSource into Grafana +curl -X "POST" "http://localhost:3000/api/datasources" \ + -H "Content-Type: application/json" \ + --user admin:admin \ + -d $'{"id":1,"orgId":1,"name":"'$table_name'","type":"elasticsearch","typeLogoUrl":"public/app/plugins/datasource/elasticsearch/img/elasticsearch.svg","access":"proxy","url":"http://localhost:9200","password":"","user":"","database":"'$table_name'","basicAuth":false,"isDefault":false,"jsonData":{"timeField":"EVENT_TS"}}' + + +echo "" +echo "Navigate to Grafana and create a Dashboard Panel: http://localhost:3000/dashboard/new" +echo "Add the datasource: " $1 + + + + diff --git a/ksql-examples/examples/clickstream-analysis/ksql-tables-to-grafana.sh b/ksql-examples/examples/clickstream-analysis/ksql-tables-to-grafana.sh new file mode 100755 index 000000000000..cae8f2b42630 --- /dev/null +++ b/ksql-examples/examples/clickstream-analysis/ksql-tables-to-grafana.sh @@ -0,0 +1,51 @@ +#!/usr/bin/env bash + + +echo "Loading Clickstream-Demo TABLES to Confluent-Connect => Elastic => Grafana datasource" +./elastic-dynamic-template.sh + +declare -a tables=('click_user_sessions_ts' 'clickstream_status_codes_ts' 'enriched_error_codes_ts' 'errors_per_min_alert_ts' 'errors_per_min_ts' 'events_per_min_max_avg_ts' 'events_per_min_ts' 'pages_per_min_ts'); +for i in "${tables[@]}" +do + + table_name=$i + TABLE_NAME=`echo $table_name | tr '[a-z]' '[A-Z]'` + + ## Cleanup existing data + + # Elastic + curl -X "DELETE" "http://localhost:9200/""$table_name" + + # Connect + curl -X "DELETE" "http://localhost:8083/connectors/es_sink_""$TABLE_NAME" + + # Grafana + curl -X "DELETE" "http://localhost:3000/api/datasources/name/""$table_name" --user admin:admin + + # Wire in the new connection path + echo "\n\nConnecting KSQL->Elastic->Grafana " "$table_name" + ./ksql-connect-es-grafana.sh "$table_name" +done + +echo "Navigate to http://localhost:3000/dashboard/db/click-stream-analysis" + + +# ======================== +# REST API Notes +# ======================== +# +# Extract datasources from grafana +# curl -s "http://localhost:3000/api/datasources" -u admin:admin|jq -c -M '.[]' +# +# Delete a Grafana DataSource +# curl -X "DELETE" "http://localhost:3000/api/datasources/name/ + +# List confluent connectors +# curl -X "GET" "http://localhost:8083/connectors" +# +# Delete a Confluent-Connector +# curl -X "DELETE" "http://localhost:8083/connectors/es_sink_PER_USER_KBYTES_TS" +# +# Delete an Elastic Index +# curl -X "DELETE" "http://localhost:9200/per_user_kbytes_ts" +# diff --git a/ksql-examples/examples/clickstream-analysis/malicious-users-dashboard.json b/ksql-examples/examples/clickstream-analysis/malicious-users-dashboard.json new file mode 100644 index 000000000000..a0dac5f57702 --- /dev/null +++ b/ksql-examples/examples/clickstream-analysis/malicious-users-dashboard.json @@ -0,0 +1,203 @@ +{ + "dashboard": { + "__inputs": [ + { + "name": "malicious_user_sessions_ts", + "label": "malicious_user_sessions_ts", + "description": "", + "type": "datasource", + "pluginId": "elasticsearch", + "pluginName": "Elasticsearch" + } + ], + "__requires": [ + { + "type": "datasource", + "id": "elasticsearch", + "name": "Elasticsearch", + "version": "3.0.0" + }, + { + "type": "grafana", + "id": "grafana", + "name": "Grafana", + "version": "4.4.1" + }, + { + "type": "panel", + "id": "graph", + "name": "Graph", + "version": "" + } + ], + "annotations": { + "list": [] + }, + "editable": true, + "gnetId": null, + "graphTooltip": 0, + "hideControls": false, + "id": null, + "links": [], + "rows": [ + { + "collapse": false, + "height": "250px", + "panels": [ + { + "aliasColors": {}, + "bars": false, + "dashLength": 10, + "dashes": false, + "datasource": "malicious_user_sessions_ts", + "fill": 1, + "id": 1, + "legend": { + "avg": false, + "current": false, + "max": false, + "min": false, + "show": true, + "total": false, + "values": false + }, + "lines": true, + "linewidth": 1, + "links": [], + "nullPointMode": "null", + "percentage": false, + "pointradius": 5, + "points": true, + "renderer": "flot", + "seriesOverrides": [], + "spaceLength": 10, + "span": 12, + "stack": false, + "steppedLine": false, + "targets": [ + { + "bucketAggs": [ + { + "fake": true, + "field": "IP", + "id": "4", + "settings": { + "min_doc_count": 1, + "order": "desc", + "orderBy": "_term", + "size": "10" + }, + "type": "terms" + }, + { + "field": "EVENT_TS", + "id": "2", + "settings": { + "interval": "10s", + "min_doc_count": 0, + "trimEdges": 0 + }, + "type": "date_histogram" + } + ], + "dsType": "elasticsearch", + "metrics": [ + { + "field": "KBYTES", + "id": "1", + "meta": {}, + "settings": {}, + "type": "max" + } + ], + "refId": "A", + "timeField": "EVENT_TS" + } + ], + "thresholds": [], + "timeFrom": null, + "timeShift": null, + "title": "Panel Title", + "tooltip": { + "shared": true, + "sort": 0, + "value_type": "individual" + }, + "type": "graph", + "xaxis": { + "buckets": null, + "mode": "time", + "name": null, + "show": true, + "values": [] + }, + "yaxes": [ + { + "format": "short", + "label": null, + "logBase": 1, + "max": null, + "min": null, + "show": true + }, + { + "format": "short", + "label": null, + "logBase": 1, + "max": null, + "min": null, + "show": true + } + ] + } + ], + "repeat": null, + "repeatIteration": null, + "repeatRowId": null, + "showTitle": false, + "title": "Dashboard Row", + "titleSize": "h6" + } + ], + "schemaVersion": 14, + "style": "dark", + "tags": [], + "templating": { + "list": [] + }, + "time": { + "from": "now-5m", + "to": "now" + }, + "timepicker": { + "refresh_intervals": [ + "5s", + "10s", + "30s", + "1m", + "5m", + "15m", + "30m", + "1h", + "2h", + "1d" + ], + "time_options": [ + "5m", + "15m", + "1h", + "6h", + "12h", + "24h", + "2d", + "7d", + "30d" + ] + }, + "timezone": "", + "title": "Malicious User Sessions", + "version": 1 +}, +"overwrite": false +} + diff --git a/ksql-examples/examples/clickstream-analysis/malicious-users-dashboard.sh b/ksql-examples/examples/clickstream-analysis/malicious-users-dashboard.sh new file mode 100755 index 000000000000..211becd7b406 --- /dev/null +++ b/ksql-examples/examples/clickstream-analysis/malicious-users-dashboard.sh @@ -0,0 +1,13 @@ +#!/usr/bin/env bash +#curl -X "GET" "http://localhost:3000/api/dashboards/db/click-stream-analysis" \ +# -H "Content-Type: application/json" \ +# --user admin:admin + + +echo "Loading Grafana ClickStream Dashboard" + +curl -X "POST" "http://localhost:3000/api/dashboards/db" \ + -H "Content-Type: application/json" \ + --user admin:admin \ + --data-binary @malicious-users-dashboard.json + diff --git a/ksql-examples/examples/clickstream-analysis/run-grafana.sh b/ksql-examples/examples/clickstream-analysis/run-grafana.sh new file mode 100755 index 000000000000..a2df1e43bc2a --- /dev/null +++ b/ksql-examples/examples/clickstream-analysis/run-grafana.sh @@ -0,0 +1,2 @@ +#!/usr/bin/env bash +grafana-server --config=/usr/local/etc/grafana/grafana.ini --homepath /usr/local/share/grafana cfg:default.paths.logs=/usr/local/var/log/grafana cfg:default.paths.data=/usr/local/var/lib/grafana cfg:default.paths.plugins=/usr/local/var/lib/grafana/plugins \ No newline at end of file diff --git a/ksql-examples/pom.xml b/ksql-examples/pom.xml new file mode 100644 index 000000000000..1002021b0fd6 --- /dev/null +++ b/ksql-examples/pom.xml @@ -0,0 +1,149 @@ + + + 4.0.0 + + + io.confluent.ksql + ksql-parent + 0.1-SNAPSHOT + + + ksql-examples + + + ${project.parent.basedir} + io.confluent.ksql.datagen.DataGen + false + ${main-class} + + + + + io.confluent.ksql + ksql-core + + + + io.confluent + kafka-connect-avro-converter + + + + com.github.mifmif + generex + ${generext.version} + + + + + junit + junit + + + junit + junit + compile + + + + + + + + org.apache.maven.plugins + maven-assembly-plugin + + + src/assembly/development.xml + src/assembly/package.xml + src/assembly/standalone.xml + + + + ${main-class} + + + false + + + + make-assembly + package + + single + + + + + + + org.codehaus.mojo + exec-maven-plugin + ${exec-maven-plugin.version} + + + create-licenses + + io.confluent.licenses.LicenseFinder + + + -i ${project.build.directory}/${project.build.finalName}-package/share/java/${artifactId} + -f + -h ${project.build.directory}/${project.build.finalName}-package/share/doc/${project.artifactId}/licenses.html + -l ${project.build.directory}/${project.build.finalName}-package/share/doc/${project.artifactId}/licenses + -n ${project.build.directory}/${project.build.finalName}-package/share/doc/${project.artifactId}/notices + -x licenses-${project.version}.jar + + + package + + java + + + + + true + true + + io.confluent + licenses + + + + + io.confluent + licenses + ${licenses.version} + + + + + + com.spotify + dockerfile-maven-plugin + ${dockerfile-maven-plugin.version} + + + default + + build + + + + ${project.artifactId} + ${project.version} + ${docker.registry} + + ${docker.tag} + ${docker.registry}confluentinc/${project.artifactId} + + + + + + + + diff --git a/ksql-examples/src/assembly/development.xml b/ksql-examples/src/assembly/development.xml new file mode 100644 index 000000000000..33c75594923e --- /dev/null +++ b/ksql-examples/src/assembly/development.xml @@ -0,0 +1,51 @@ + + + development + + dir + + false + + + ${project.parent.basedir} + share/doc/ksql-examples/ + + README* + COPYRIGHT* + + + + ${project.parent.basedir} + + + bin/* + + + + ${project.parent.basedir}/config + etc/ksql-examples + + * + + + + + + share/java/ksql-examples + true + + true + + org.slf4j:slf4j-log4j12 + + + + diff --git a/ksql-examples/src/assembly/package.xml b/ksql-examples/src/assembly/package.xml new file mode 100644 index 000000000000..f953402c88f6 --- /dev/null +++ b/ksql-examples/src/assembly/package.xml @@ -0,0 +1,52 @@ + + + package + + dir + + false + + + ${project.parent.basedir} + share/doc/ksql-examples/ + + version.txt + COPYRIGHT* + + + + ${project.parent.basedir} + + + bin/* + + + + ${project.parent.basedir}/config + etc/ksql-examples + + * + + + + + + share/java/ksql-examples + true + + true + + com.google.guava:guava + + + + diff --git a/ksql-examples/src/assembly/standalone.xml b/ksql-examples/src/assembly/standalone.xml new file mode 100644 index 000000000000..73c3306a30ef --- /dev/null +++ b/ksql-examples/src/assembly/standalone.xml @@ -0,0 +1,30 @@ + + + standalone + + jar + + false + + + ${project.parent.basedir} + / + + README* + COPYRIGHT* + + + + + + / + true + true + runtime + + + diff --git a/ksql-examples/src/main/java/io/confluent/avro/random/generator/Generator.java b/ksql-examples/src/main/java/io/confluent/avro/random/generator/Generator.java new file mode 100644 index 000000000000..7517be3435b9 --- /dev/null +++ b/ksql-examples/src/main/java/io/confluent/avro/random/generator/Generator.java @@ -0,0 +1,1379 @@ +package io.confluent.avro.random.generator; + +import com.mifmif.common.regex.Generex; +import org.apache.avro.Schema; + +import org.apache.avro.generic.GenericData; +import org.apache.avro.generic.GenericDatumReader; +import org.apache.avro.generic.GenericEnumSymbol; +import org.apache.avro.generic.GenericFixed; +import org.apache.avro.generic.GenericRecord; +import org.apache.avro.generic.GenericRecordBuilder; + +import org.apache.avro.io.DatumReader; +import org.apache.avro.io.Decoder; +import org.apache.avro.io.DecoderFactory; + +import java.io.EOFException; +import java.io.File; +import java.io.FileInputStream; +import java.io.FileNotFoundException; +import java.io.IOException; +import java.io.InputStream; + +import java.nio.ByteBuffer; +import java.nio.charset.Charset; +import java.nio.charset.StandardCharsets; + +import java.util.ArrayList; +import java.util.Collection; +import java.util.Collections; +import java.util.HashMap; +import java.util.Iterator; +import java.util.List; +import java.util.Map; +import java.util.Optional; +import java.util.Random; + +/** + * Generates Java objects according to an {@link Schema Avro Schema}. + */ +public class Generator { + + private static final Schema.Parser schemaParser = new Schema.Parser(); + private static final Map generexCache = new HashMap<>(); + private static final Map> optionsCache = new HashMap<>(); + private static final Map> iteratorCache = new HashMap<>(); + + /** + * The name to use for the top-level JSON property when specifying ARG-specific attributes. + */ + public static final String ARG_PROPERTIES_PROP = "arg.properties"; + + /** + * The name of the attribute for specifying length for supported schemas. Can be given as either + * an integral number or an object with at least one of {@link #LENGTH_PROP_MIN} or + * {@link #LENGTH_PROP_MAX} specified. + */ + public static final String LENGTH_PROP = "length"; + /** + * The name of the attribute for specifying the minimum length a generated value should have. + * Must be given as an integral number greater than or equal to zero. + */ + public static final String LENGTH_PROP_MIN = "min"; + /** + * The name of the attribute for specifying the maximum length a generated value should have. + * Must be given as an integral number strictly greater than the value given for + * {@link #LENGTH_PROP_MIN}, or strictly greater than zero if none is specified. + */ + public static final String LENGTH_PROP_MAX = "max"; + + /** + * The name of the attribute for specifying a regex that generated values should adhere to. Can + * be used in conjunction with {@link #LENGTH_PROP}. Must be given as a string. + */ + public static final String REGEX_PROP = "regex"; + + /** + * The name of the attribute for specifying specific values which should be randomly chosen from + * when generating values for the schema. Can be given as either an array of values or an object + * with both {@link #OPTIONS_PROP_FILE} and {@link #OPTIONS_PROP_ENCODING} + * specified. + */ + public static final String OPTIONS_PROP = "options"; + /** + * The name of a file from which to read specific values to generate for the given schema. Must + * be given as a string. + */ + public static final String OPTIONS_PROP_FILE = "file"; + /** + * The encoding of the options file; currently only "binary" and "json" are supported. Must be + * given as a string. + */ + public static final String OPTIONS_PROP_ENCODING = "encoding"; + + /** + * The name of the attribute for specifying special properties for keys in map schemas. Since + * all Avro maps have keys of type string, no schema is supplied to specify key attributes; this + * special keys attribute takes its place. Must be given as an object. + */ + public static final String KEYS_PROP = "keys"; + + /** + * The name of the attribute for specifying a possible range of values for numeric types. Must be + * given as an object. + */ + public static final String RANGE_PROP = "range"; + /** + * The name of the attribute for specifying the (inclusive) minimum value in a range. Must be + * given as a numeric type that is integral if the given schema is as well. + */ + public static final String RANGE_PROP_MIN = "min"; + /** + * The name of the attribute for specifying the (exclusive) maximum value in a range. Must be + * given as a numeric type that is integral if the given schema is as well. + */ + public static final String RANGE_PROP_MAX = "max"; + + /** + * The name of the attribute for specifying the likelihood that the value true is generated for a + * boolean schema. Must be given as a floating type in the range [0.0, 1.0]. + */ + public static final String ODDS_PROP = "odds"; + + /** + * The name of the attribute for specifying iterative behavior for generated values. Must be + * given as an object with at least the {@link #ITERATION_PROP_START} property specified. The + * first generated value for the schema will then be equal to the value given for + * {@link #ITERATION_PROP_START}, and successive values will increment by the value given for + * {@link #ITERATION_PROP_STEP} (or its default, if no value is given), wrapping around at the + * value given for {@link #ITERATION_PROP_RESTART} (or its default, if no value is given). + */ + public static final String ITERATION_PROP = "iteration"; + /** + * The name of the attribute for specifying the first value in a schema with iterative + * generation. Must be given as a numeric type that is integral if the given schema is as well. + */ + public static final String ITERATION_PROP_START = "start"; + /** + * The name of the attribute for specifying the wraparound value in a schema with iterative + * generation. If given, must be a numeric type that is integral if the given schema is as well. + * If not given, defaults to the maximum possible value for the schema type if the value for + * {@link #ITERATION_PROP_STEP} is positive, or the minimum possible value for the schema type if + * the value for {@link #ITERATION_PROP_STEP} is negative. + */ + public static final String ITERATION_PROP_RESTART = "restart"; + /** + * The name of the attribute for specifying the increment value in a schema with iterative + * generation. If given, must be a numeric type that is integral if the given schema is as well. + * If not given, defaults to 1 if the value for {@link #ITERATION_PROP_RESTART} is greater than + * the value for {@link #ITERATION_PROP_START}, and -1 if the value for + * {@link #ITERATION_PROP_RESTART} is less than the value for {@link #ITERATION_PROP_START}. + */ + public static final String ITERATION_PROP_STEP = "step"; + + private final Schema topLevelSchema; + private final Random random; + + /** + * Creates a generator out of an already-parsed {@link Schema}. + * @param topLevelSchema The schema to generate values for. + * @param random The object to use for generating randomness when producing values. + */ + public Generator(Schema topLevelSchema, Random random) { + this.topLevelSchema = topLevelSchema; + this.random = random; + } + + /** + * Creates a generator out of the yet-to-be-parsed Schema string. + * @param schemaString An Avro Schema represented as a string. + * @param random The object to use for generating randomness when producing values. + */ + public Generator(String schemaString, Random random) { + this(schemaParser.parse(schemaString), random); + } + + /** + * Reads in a schema, parses it, and creates a generator for it. + * @param schemaStream The stream that the schema is read from. + * @param random The object to use for generating randomness when producing values. + * @throws IOException if an error occurs while reading from the input stream. + */ + public Generator(InputStream schemaStream, Random random) throws IOException { + this(schemaParser.parse(schemaStream), random); + } + + /** + * Reads in a schema, parses it, and creates a generator for it. + * @param schemaFile The file that contains the schema to generate values for. + * @param random The object to use for generating randomness when producing values. + * @throws IOException if an error occurs while reading from the schema file. + */ + public Generator(File schemaFile, Random random) throws IOException { + this(schemaParser.parse(schemaFile), random); + } + + /** + * @return The schema that the generator produces values for. + */ + public Schema schema() { + return topLevelSchema; + } + + /** + * Generate an object that matches the given schema and its specified properties. + * @return An object whose type corresponds to the top-level schema as follows: + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + *
Schema TypeJava Class
{@link org.apache.avro.Schema.Type#ARRAY ARRAY}{@link Collection}
{@link org.apache.avro.Schema.Type#BOOLEAN BOOLEAN}{@link Boolean}
{@link org.apache.avro.Schema.Type#BYTES BYTES}{@link ByteBuffer}
{@link org.apache.avro.Schema.Type#DOUBLE DOUBLE}{@link Double}
{@link org.apache.avro.Schema.Type#ENUM ENUM}{@link GenericEnumSymbol}
{@link org.apache.avro.Schema.Type#FIXED FIXED}{@link GenericFixed}
{@link org.apache.avro.Schema.Type#FLOAT FLOAT}{@link Float}
{@link org.apache.avro.Schema.Type#INT INT}{@link Integer}
{@link org.apache.avro.Schema.Type#LONG LONG}{@link Long}
{@link org.apache.avro.Schema.Type#MAP MAP} + * {@link Map}<{@link String}, V> where V is the corresponding Java class for the + * Avro map's values + *
{@link org.apache.avro.Schema.Type#NULL NULL}{@link Object} (but will always be null)
{@link org.apache.avro.Schema.Type#RECORD RECORD}{@link GenericRecord}
{@link org.apache.avro.Schema.Type#STRING STRING}{@link String}
{@link org.apache.avro.Schema.Type#UNION UNION} + * The corresponding Java class for whichever schema is chosen to be generated out of the + * ones present in the given Avro union. + *
+ */ + public Object generate() { + return generateObject(topLevelSchema); + } + + private Object generateObject(Schema schema) { + Map propertiesProp = getProperties(schema).orElse(Collections.emptyMap()); + if (propertiesProp.containsKey(OPTIONS_PROP)) { + return generateOption(schema, propertiesProp); + } + if (propertiesProp.containsKey(ITERATION_PROP)) { + return generateIteration(schema, propertiesProp); + } + switch (schema.getType()) { + case ARRAY: + return generateArray(schema, propertiesProp); + case BOOLEAN: + return generateBoolean(propertiesProp); + case BYTES: + return generateBytes(propertiesProp); + case DOUBLE: + return generateDouble(propertiesProp); + case ENUM: + return generateEnumSymbol(schema); + case FIXED: + return generateFixed(schema); + case FLOAT: + return generateFloat(propertiesProp); + case INT: + return generateInt(propertiesProp); + case LONG: + return generateLong(propertiesProp); + case MAP: + return generateMap(schema, propertiesProp); + case NULL: + return generateNull(); + case RECORD: + return generateRecord(schema); + case STRING: + return generateString(schema, propertiesProp); + case UNION: + return generateUnion(schema); + default: + throw new RuntimeException("Unrecognized schema type: " + schema.getType()); + } + } + + private Optional getProperties(Schema schema) { + Object propertiesProp = schema.getObjectProp(ARG_PROPERTIES_PROP); + if (propertiesProp == null) { + return Optional.empty(); + } else if (propertiesProp instanceof Map) { + return Optional.of((Map) propertiesProp); + } else { + throw new RuntimeException(String.format( + "%s property must be given as object, was %s instead", + ARG_PROPERTIES_PROP, + propertiesProp.getClass().getName() + )); + } + } + + private void enforceMutualExclusion( + Map propertiesProp, + String includedProp, + String... excludedProps) { + for (String excludedProp : excludedProps) { + if (propertiesProp.containsKey(excludedProp)) { + throw new RuntimeException(String.format( + "Cannot specify %s prop when %s prop is given", + excludedProp, + includedProp + )); + } + } + } + + @SuppressWarnings("unchecked") + private Object wrapOption(Schema schema, Object option) { + if (schema.getType() == Schema.Type.BYTES && option instanceof String) { + option = ByteBuffer.wrap(((String) option).getBytes(Charset.defaultCharset())); + } else if (schema.getType() == Schema.Type.FLOAT && option instanceof Double) { + option = ((Double) option).floatValue(); + } else if (schema.getType() == Schema.Type.LONG && option instanceof Integer) { + option = ((Integer) option).longValue(); + } else if (schema.getType() == Schema.Type.ARRAY && option instanceof Collection) { + option = new GenericData.Array(schema, (Collection) option); + } else if (schema.getType() == Schema.Type.ENUM && option instanceof String) { + option = new GenericData.EnumSymbol(schema, (String) option); + } else if (schema.getType() == Schema.Type.FIXED && option instanceof String) { + option = + new GenericData.Fixed(schema, ((String) option).getBytes(Charset.defaultCharset())); + } else if (schema.getType() == Schema.Type.RECORD && option instanceof Map) { + Map optionMap = (Map) option; + GenericRecordBuilder optionBuilder = new GenericRecordBuilder(schema); + for (Schema.Field field : schema.getFields()) { + if (optionMap.containsKey(field.name())) { + optionBuilder.set(field, optionMap.get(field.name())); + } + } + option = optionBuilder.build(); + } + return option; + } + + @SuppressWarnings("unchecked") + private List parseOptions(Schema schema, Map propertiesProp) { + enforceMutualExclusion( + propertiesProp, OPTIONS_PROP, + LENGTH_PROP, REGEX_PROP, ITERATION_PROP, RANGE_PROP + ); + + Object optionsProp = propertiesProp.get(OPTIONS_PROP); + if (optionsProp instanceof Collection) { + Collection optionsList = (Collection) optionsProp; + if (optionsList.isEmpty()) { + throw new RuntimeException(String.format( + "%s property cannot be empty", + OPTIONS_PROP + )); + } + List options = new ArrayList<>(); + for (Object option : optionsList) { + option = wrapOption(schema, option); + if (!GenericData.get().validate(schema, option)) { + throw new RuntimeException(String.format( + "Invalid option for %s schema: type %s, value '%s'", + schema.getType().getName(), + option.getClass().getName(), + option + )); + } + options.add(option); + } + return options; + } else if (optionsProp instanceof Map) { + Map optionsProps = (Map) optionsProp; + Object optionsFile = optionsProps.get(OPTIONS_PROP_FILE); + if (optionsFile == null) { + throw new RuntimeException(String.format( + "%s property must contain '%s' field when given as object", + OPTIONS_PROP, + OPTIONS_PROP_FILE + )); + } + if (!(optionsFile instanceof String)) { + throw new RuntimeException(String.format( + "'%s' field of %s property must be given as string, was %s instead", + OPTIONS_PROP_FILE, + OPTIONS_PROP, + optionsFile.getClass().getName() + )); + } + Object optionsEncoding = optionsProps.get(OPTIONS_PROP_ENCODING); + if (optionsEncoding == null) { + throw new RuntimeException(String.format( + "%s property must contain '%s' field when given as object", + OPTIONS_PROP, + OPTIONS_PROP_FILE + )); + } + if (!(optionsEncoding instanceof String)) { + throw new RuntimeException(String.format( + "'%s' field of %s property must be given as string, was %s instead", + OPTIONS_PROP_ENCODING, + OPTIONS_PROP, + optionsEncoding.getClass().getName() + )); + } + try (InputStream optionsStream = new FileInputStream((String) optionsFile)) { + DatumReader optionReader = new GenericDatumReader(schema); + Decoder decoder; + if ("binary".equals(optionsEncoding)) { + decoder = DecoderFactory.get().binaryDecoder(optionsStream, null); + } else if ("json".equals(optionsEncoding)) { + decoder = DecoderFactory.get().jsonDecoder(schema, optionsStream); + } else { + throw new RuntimeException(String.format( + "'%s' field of %s property only supports two formats: 'binary' and 'json'", + OPTIONS_PROP_ENCODING, + OPTIONS_PROP + )); + } + List options = new ArrayList<>(); + Object option = optionReader.read(null, decoder); + while (option != null) { + option = wrapOption(schema, option); + if (!GenericData.get().validate(schema, option)) { + throw new RuntimeException(String.format( + "Invalid option for %s schema: type %s, value '%s'", + schema.getType().getName(), + option.getClass().getName(), + option + )); + } + options.add(option); + try { + option = optionReader.read(null, decoder); + } catch (EOFException eofe) { + break; + } + } + return options; + } catch (FileNotFoundException fnfe) { + throw new RuntimeException( + String.format( + "Unable to locate options file '%s'", + optionsFile + ), + fnfe + ); + } catch (IOException ioe) { + throw new RuntimeException( + String.format( + "Unable to read options file '%s'", + optionsFile + ), + ioe + ); + } + } else { + throw new RuntimeException(String.format( + "%s prop must be an array or an object, was %s instead", + OPTIONS_PROP, + optionsProp.getClass().getName() + )); + } + } + + @SuppressWarnings("unchecked") + private T generateOption(Schema schema, Map propertiesProp) { + if (!optionsCache.containsKey(schema)) { + optionsCache.put(schema, parseOptions(schema, propertiesProp)); + } + List options = optionsCache.get(schema); + return (T) options.get(random.nextInt(options.size())); + } + + private Iterator getBooleanIterator(Map iterationProps) { + Object startProp = iterationProps.get(ITERATION_PROP_START); + if (startProp == null) { + throw new RuntimeException(String.format( + "%s property must contain %s field", + ITERATION_PROP, + ITERATION_PROP_START + )); + } + if (!(startProp instanceof Boolean)) { + throw new RuntimeException(String.format( + "%s field of %s property for a boolean schema must be a boolean, was %s instead", + ITERATION_PROP_START, + ITERATION_PROP, + startProp.getClass().getName() + )); + } + if (iterationProps.containsKey(ITERATION_PROP_RESTART)) { + throw new RuntimeException(String.format( + "%s property cannot contain %s field for a boolean schema", + ITERATION_PROP, + ITERATION_PROP_RESTART + )); + } + if (iterationProps.containsKey(ITERATION_PROP_STEP)) { + throw new RuntimeException(String.format( + "%s property cannot contain %s field for a boolean schema", + ITERATION_PROP, + ITERATION_PROP_STEP + )); + } + return new BooleanIterator((Boolean) startProp); + } + + private Iterator getIntegralIterator( + Long iterationStartField, + Long iterationRestartField, + Long iterationStepField, + IntegralIterator.Type type) { + + if (iterationStartField == null) { + throw new RuntimeException(String.format( + "%s property must contain %s field", + ITERATION_PROP, + ITERATION_PROP_START + )); + } + + long iterationStart = iterationStartField; + long iterationRestart; + long iterationStep; + + long restartHighDefault; + long restartLowDefault; + switch (type) { + case INTEGER: + restartHighDefault = Integer.MAX_VALUE; + restartLowDefault = Integer.MIN_VALUE; + break; + case LONG: + restartHighDefault = Long.MAX_VALUE; + restartLowDefault = Long.MIN_VALUE; + break; + default: + throw new RuntimeException(String.format( + "Unexpected IntegralIterator type: %s", + type + )); + } + + if (iterationRestartField == null && iterationStepField == null) { + iterationRestart = restartHighDefault; + iterationStep = 1; + } else if (iterationRestartField == null) { + iterationStep = iterationStepField; + if (iterationStep > 0) { + iterationRestart = restartHighDefault; + } else if (iterationStep < 0) { + iterationRestart = -1 * restartLowDefault; + } else { + throw new RuntimeException(String.format( + "%s field of %s property cannot be zero", + ITERATION_PROP_STEP, + ITERATION_PROP + )); + } + } else if (iterationStepField == null) { + iterationRestart = iterationRestartField; + if (iterationRestart > iterationStart) { + iterationStep = 1; + } else if (iterationRestart < iterationStart) { + iterationStep = -1; + } else { + throw new RuntimeException(String.format( + "%s and %s fields of %s property cannot be equal", + ITERATION_PROP_START, + ITERATION_PROP_RESTART, + ITERATION_PROP + )); + } + } else { + iterationRestart = iterationRestartField; + iterationStep = iterationStepField; + if (iterationStep == 0) { + throw new RuntimeException(String.format( + "%s field of %s property cannot be zero", + ITERATION_PROP_STEP, + ITERATION_PROP + )); + } + if (iterationStart == iterationRestart) { + throw new RuntimeException(String.format( + "%s and %s fields of %s property cannot be equal", + ITERATION_PROP_START, + ITERATION_PROP_RESTART, + ITERATION_PROP + )); + } + if (iterationRestart > iterationStart && iterationStep < 0) { + throw new RuntimeException(String.format( + "%s field of %s property must be positive when %s field is greater than %s field", + ITERATION_PROP_STEP, + ITERATION_PROP, + ITERATION_PROP_RESTART, + ITERATION_PROP_START + )); + } + if (iterationRestart < iterationStart && iterationStep > 0) { + throw new RuntimeException(String.format( + "%s field of %s property must be negative when %s field is less than %s field", + ITERATION_PROP_STEP, + ITERATION_PROP, + ITERATION_PROP_RESTART, + ITERATION_PROP_START + )); + } + } + + return new IntegralIterator( + iterationStart, + iterationRestart, + iterationStep, + type + ); + } + + private Iterator getDecimalIterator( + Double iterationStartField, + Double iterationRestartField, + Double iterationStepField, + DecimalIterator.Type type) { + + if (iterationStartField == null) { + throw new RuntimeException(String.format( + "%s property must contain %s field", + ITERATION_PROP, + ITERATION_PROP_START + )); + } + + double iterationStart = iterationStartField; + double iterationRestart; + double iterationStep; + + double restartHighDefault; + double restartLowDefault; + switch (type) { + case FLOAT: + restartHighDefault = Float.MAX_VALUE; + restartLowDefault = -1 * Float.MAX_VALUE; + break; + case DOUBLE: + restartHighDefault = Double.MAX_VALUE; + restartLowDefault = -1 * Double.MAX_VALUE; + break; + default: + throw new RuntimeException(String.format( + "Unexpected DecimalIterator type: %s", + type + )); + } + + if (iterationRestartField == null && iterationStepField == null) { + iterationRestart = restartHighDefault; + iterationStep = 1; + } else if (iterationRestartField == null) { + iterationStep = iterationStepField; + if (iterationStep > 0) { + iterationRestart = restartHighDefault; + } else if (iterationStep < 0) { + iterationRestart = -1 * restartLowDefault; + } else { + throw new RuntimeException(String.format( + "%s field of %s property cannot be zero", + ITERATION_PROP_STEP, + ITERATION_PROP + )); + } + } else if (iterationStepField == null) { + iterationRestart = iterationRestartField; + if (iterationRestart > iterationStart) { + iterationStep = 1; + } else if (iterationRestart < iterationStart) { + iterationStep = -1; + } else { + throw new RuntimeException(String.format( + "%s and %s fields of %s property cannot be equal", + ITERATION_PROP_START, + ITERATION_PROP_RESTART, + ITERATION_PROP + )); + } + } else { + iterationRestart = iterationRestartField; + iterationStep = iterationStepField; + if (iterationStep == 0) { + throw new RuntimeException(String.format( + "%s field of %s property cannot be zero", + ITERATION_PROP_STEP, + ITERATION_PROP + )); + } + if (iterationStart == iterationRestart) { + throw new RuntimeException(String.format( + "%s and %s fields of %s property cannot be equal", + ITERATION_PROP_START, + ITERATION_PROP_RESTART, + ITERATION_PROP + )); + } + if (iterationRestart > iterationStart && iterationStep < 0) { + throw new RuntimeException(String.format( + "%s field of %s property must be positive when %s field is greater than %s field", + ITERATION_PROP_STEP, + ITERATION_PROP, + ITERATION_PROP_RESTART, + ITERATION_PROP_START + )); + } + if (iterationRestart < iterationStart && iterationStep > 0) { + throw new RuntimeException(String.format( + "%s field of %s property must be negative when %s field is less than %s field", + ITERATION_PROP_STEP, + ITERATION_PROP, + ITERATION_PROP_RESTART, + ITERATION_PROP_START + )); + } + } + + return new DecimalIterator( + iterationStart, + iterationRestart, + iterationStep, + type + ); + } + + private Iterator parseIterations(Schema schema, Map propertiesProp) { + enforceMutualExclusion( + propertiesProp, ITERATION_PROP, + LENGTH_PROP, REGEX_PROP, OPTIONS_PROP, RANGE_PROP + ); + + Object iterationProp = propertiesProp.get(ITERATION_PROP); + if (iterationProp instanceof Map) { + Map iterationProps = (Map) iterationProp; + switch (schema.getType()) { + case BOOLEAN: + return getBooleanIterator(iterationProps); + case INT: { + Integer iterationStartField = getIntegerNumberField( + ITERATION_PROP, + ITERATION_PROP_START, + iterationProps + ); + Integer iterationRestartField = getIntegerNumberField( + ITERATION_PROP, + ITERATION_PROP_RESTART, + iterationProps + ); + Integer iterationStepField = getIntegerNumberField( + ITERATION_PROP, + ITERATION_PROP_STEP, + iterationProps + ); + return getIntegralIterator( + iterationStartField != null ? iterationStartField.longValue() : null, + iterationRestartField != null ? iterationRestartField.longValue() : null, + iterationStepField != null ? iterationStepField.longValue() : null, + IntegralIterator.Type.INTEGER + ); + } + case LONG: { + Long iterationStartField = getIntegralNumberField( + ITERATION_PROP, + ITERATION_PROP_START, + iterationProps + ); + Long iterationRestartField = getIntegralNumberField( + ITERATION_PROP, + ITERATION_PROP_RESTART, + iterationProps + ); + Long iterationStepField = getIntegralNumberField( + ITERATION_PROP, + ITERATION_PROP_STEP, + iterationProps + ); + return getIntegralIterator( + iterationStartField, + iterationRestartField, + iterationStepField, + IntegralIterator.Type.LONG + ); + } + case FLOAT: { + Float iterationStartField = getFloatNumberField( + ITERATION_PROP, + ITERATION_PROP_START, + iterationProps + ); + Float iterationRestartField = getFloatNumberField( + ITERATION_PROP, + ITERATION_PROP_RESTART, + iterationProps + ); + Float iterationStepField = getFloatNumberField( + ITERATION_PROP, + ITERATION_PROP_STEP, + iterationProps + ); + return getDecimalIterator( + iterationStartField != null ? iterationStartField.doubleValue() : null, + iterationRestartField != null ? iterationRestartField.doubleValue() : null, + iterationStepField != null ? iterationStepField.doubleValue() : null, + DecimalIterator.Type.FLOAT + ); + } + case DOUBLE: { + Double iterationStartField = getDecimalNumberField( + ITERATION_PROP, + ITERATION_PROP_START, + iterationProps + ); + Double iterationRestartField = getDecimalNumberField( + ITERATION_PROP, + ITERATION_PROP_RESTART, + iterationProps + ); + Double iterationStepField = getDecimalNumberField( + ITERATION_PROP, + ITERATION_PROP_STEP, + iterationProps + ); + return getDecimalIterator( + iterationStartField, + iterationRestartField, + iterationStepField, + DecimalIterator.Type.DOUBLE + ); + } + default: + throw new UnsupportedOperationException(String.format( + "%s property can only be specified on numeric and boolean schemas, not %s schema", + ITERATION_PROP, + schema.getType().toString() + )); + } + } else { + throw new RuntimeException(String.format( + "%s prop must be an object, was %s instead", + ITERATION_PROP, + iterationProp.getClass().getName() + )); + } + } + + @SuppressWarnings("unchecked") + private T generateIteration(Schema schema, Map propertiesProp) { + if (!iteratorCache.containsKey(schema)) { + iteratorCache.put(schema, parseIterations(schema, propertiesProp)); + } + return (T) iteratorCache.get(schema).next(); + } + + private Collection generateArray(Schema schema, Map propertiesProp) { + int length = getLengthBounds(propertiesProp).random(); + Collection result = new ArrayList<>(length); + for (int i = 0; i < length; i++) { + result.add(generateObject(schema.getElementType())); + } + return result; + } + + private Boolean generateBoolean(Map propertiesProp) { + Double odds = getDecimalNumberField(ARG_PROPERTIES_PROP, ODDS_PROP, propertiesProp); + if (odds == null) { + return random.nextBoolean(); + } else { + if (odds < 0.0 || odds > 1.0) { + throw new RuntimeException(String.format( + "%s property must be in the range [0.0, 1.0]", + ODDS_PROP + )); + } + return random.nextDouble() < odds; + } + } + + private ByteBuffer generateBytes(Map propertiesProp) { + byte[] bytes = new byte[getLengthBounds(propertiesProp.get(LENGTH_PROP)).random()]; + random.nextBytes(bytes); + return ByteBuffer.wrap(bytes); + } + + private Double generateDouble(Map propertiesProp) { + Object rangeProp = propertiesProp.get(RANGE_PROP); + if (rangeProp != null) { + if (rangeProp instanceof Map) { + Map rangeProps = (Map) rangeProp; + Double rangeMinField = getDecimalNumberField(RANGE_PROP, RANGE_PROP_MIN, rangeProps); + Double rangeMaxField = getDecimalNumberField(RANGE_PROP, RANGE_PROP_MAX, rangeProps); + double rangeMin = rangeMinField != null ? rangeMinField : -1 * Double.MAX_VALUE; + double rangeMax = rangeMaxField != null ? rangeMaxField : Double.MAX_VALUE; + if (rangeMin >= rangeMax) { + throw new RuntimeException(String.format( + "'%s' field must be strictly less than '%s' field in %s property", + RANGE_PROP_MIN, + RANGE_PROP_MAX, + RANGE_PROP + )); + } + return rangeMin + (random.nextDouble() * (rangeMax - rangeMin)); + } else { + throw new RuntimeException(String.format( + "%s property must be an object", + RANGE_PROP + )); + } + } + return random.nextDouble(); + } + + private GenericEnumSymbol generateEnumSymbol(Schema schema) { + List enums = schema.getEnumSymbols(); + return new + GenericData.EnumSymbol(schema, enums.get(random.nextInt(enums.size()))); + } + + private GenericFixed generateFixed(Schema schema) { + byte[] bytes = new byte[schema.getFixedSize()]; + random.nextBytes(bytes); + return new GenericData.Fixed(schema, bytes); + } + + private Float generateFloat(Map propertiesProp) { + Object rangeProp = propertiesProp.get(RANGE_PROP); + if (rangeProp != null) { + if (rangeProp instanceof Map) { + Map rangeProps = (Map) rangeProp; + Float rangeMinField = getFloatNumberField( + RANGE_PROP, + RANGE_PROP_MIN, + rangeProps + ); + Float rangeMaxField = getFloatNumberField( + RANGE_PROP, + RANGE_PROP_MAX, + rangeProps + ); + float rangeMin = Optional.ofNullable(rangeMinField).orElse(-1 * Float.MAX_VALUE); + float rangeMax = Optional.ofNullable(rangeMaxField).orElse(Float.MAX_VALUE); + if (rangeMin >= rangeMax) { + throw new RuntimeException(String.format( + "'%s' field must be strictly less than '%s' field in %s property", + RANGE_PROP_MIN, + RANGE_PROP_MAX, + RANGE_PROP + )); + } + return rangeMin + (random.nextFloat() * (rangeMax - rangeMin)); + } + } + return random.nextFloat(); + } + + private Integer generateInt(Map propertiesProp) { + Object rangeProp = propertiesProp.get(RANGE_PROP); + if (rangeProp != null) { + if (rangeProp instanceof Map) { + Map rangeProps = (Map) rangeProp; + Integer rangeMinField = getIntegerNumberField(RANGE_PROP, RANGE_PROP_MIN, rangeProps); + Integer rangeMaxField = getIntegerNumberField(RANGE_PROP, RANGE_PROP_MAX, rangeProps); + int rangeMin = Optional.ofNullable(rangeMinField).orElse(Integer.MIN_VALUE); + int rangeMax = Optional.ofNullable(rangeMaxField).orElse(Integer.MAX_VALUE); + if (rangeMin >= rangeMax) { + throw new RuntimeException(String.format( + "'%s' field must be strictly less than '%s' field in %s property", + RANGE_PROP_MIN, + RANGE_PROP_MAX, + RANGE_PROP + )); + } + return rangeMin + ((int) (random.nextDouble() * (rangeMax - rangeMin))); + } + } + return random.nextInt(); + } + + private Long generateLong(Map propertiesProp) { + Object rangeProp = propertiesProp.get(RANGE_PROP); + if (rangeProp != null) { + if (rangeProp instanceof Map) { + Map rangeProps = (Map) rangeProp; + Long rangeMinField = getIntegralNumberField(RANGE_PROP, RANGE_PROP_MIN, rangeProps); + Long rangeMaxField = getIntegralNumberField(RANGE_PROP, RANGE_PROP_MAX, rangeProps); + long rangeMin = Optional.ofNullable(rangeMinField).orElse(Long.MIN_VALUE); + long rangeMax = Optional.ofNullable(rangeMaxField).orElse(Long.MAX_VALUE); + if (rangeMin >= rangeMax) { + throw new RuntimeException(String.format( + "'%s' field must be strictly less than '%s' field in %s property", + RANGE_PROP_MIN, + RANGE_PROP_MAX, + RANGE_PROP + )); + } + return rangeMin + (((long) (random.nextDouble() * (rangeMax - rangeMin)))); + } + } + return random.nextLong(); + } + + private Map generateMap(Schema schema, Map propertiesProp) { + Map result = new HashMap<>(); + int length = getLengthBounds(propertiesProp).random(); + Object keyProp = propertiesProp.get(KEYS_PROP); + if (keyProp == null) { + for (int i = 0; i < length; i++) { + result.put(generateRandomString(1), generateObject(schema.getValueType())); + } + } else if (keyProp instanceof Map) { + Map keyPropMap = (Map) keyProp; + if (keyPropMap.containsKey(OPTIONS_PROP)) { + if (!optionsCache.containsKey(schema)) { + optionsCache.put(schema, parseOptions(Schema.create(Schema.Type.STRING), keyPropMap)); + } + for (int i = 0; i < length; i++) { + result.put(generateOption(schema, keyPropMap), generateObject(schema.getValueType())); + } + } else { + int keyLength = getLengthBounds(keyPropMap.get(LENGTH_PROP)).random(); + for (int i = 0; i < length; i++) { + result.put( + generateRandomString(keyLength), + generateObject(schema.getValueType()) + ); + } + } + } else { + throw new RuntimeException(String.format( + "%s prop must be an object", + KEYS_PROP + )); + } + return result; + } + + private Object generateNull() { + return null; + } + + private GenericRecord generateRecord(Schema schema) { + GenericRecordBuilder builder = new GenericRecordBuilder(schema); + for (Schema.Field field : schema.getFields()) { + builder.set(field, generateObject(field.schema())); + } + return builder.build(); + } + + @SuppressWarnings("unchecked") + private String generateRegexString(Schema schema, Object regexProp, LengthBounds lengthBounds) { + if (!generexCache.containsKey(schema)) { + if (!(regexProp instanceof String)) { + throw new RuntimeException(String.format("%s property must be a string", REGEX_PROP)); + } + generexCache.put(schema, new Generex((String) regexProp)); + } + // Generex.random(low, high) generates in range [low, high]; we want [low, high), so subtract + // 1 from maxLength + return generexCache.get(schema).random(lengthBounds.min(), lengthBounds.max() - 1); + } + + private String generateRandomString(int length) { + byte[] bytes = new byte[length]; + for (int i = 0; i < length; i++) { + bytes[i] = (byte) random.nextInt(128); + } + return new String(bytes, StandardCharsets.US_ASCII); + } + + private String generateString(Schema schema, Map propertiesProp) { + Object regexProp = propertiesProp.get(REGEX_PROP); + if (regexProp != null) { + return generateRegexString(schema, regexProp, getLengthBounds(propertiesProp)); + } else { + return generateRandomString(getLengthBounds(propertiesProp).random()); + } + } + + private Object generateUnion(Schema schema) { + List schemas = schema.getTypes(); + return generateObject(schemas.get(random.nextInt(schemas.size()))); + } + + private LengthBounds getLengthBounds(Map propertiesProp) { + return getLengthBounds(propertiesProp.get(LENGTH_PROP)); + } + + private LengthBounds getLengthBounds(Object lengthProp) { + if (lengthProp == null) { + return new LengthBounds(); + } else if (lengthProp instanceof Integer) { + Integer length = (Integer) lengthProp; + if (length < 0) { + throw new RuntimeException(String.format( + "when given as integral number, %s property cannot be negative", + LENGTH_PROP + )); + } + return new LengthBounds(length); + } else if (lengthProp instanceof Map) { + Map lengthProps = (Map) lengthProp; + Integer minLength = getIntegerNumberField(LENGTH_PROP, LENGTH_PROP_MIN, lengthProps); + Integer maxLength = getIntegerNumberField(LENGTH_PROP, LENGTH_PROP_MAX, lengthProps); + if (minLength == null && maxLength == null) { + throw new RuntimeException(String.format( + "%s property must contain at least one of '%s' or '%s' fields when given as object", + LENGTH_PROP, + LENGTH_PROP_MIN, + LENGTH_PROP_MAX + )); + } + minLength = minLength != null ? minLength : 0; + maxLength = maxLength != null ? maxLength : Integer.MAX_VALUE; + if (minLength < 0) { + throw new RuntimeException(String.format( + "%s field of %s property cannot be negative", + LENGTH_PROP_MIN, + LENGTH_PROP + )); + } + if (maxLength <= minLength) { + throw new RuntimeException(String.format( + "%s field must be strictly greater than %s field for %s property", + LENGTH_PROP_MAX, + LENGTH_PROP_MIN, + LENGTH_PROP + )); + } + return new LengthBounds(minLength, maxLength); + } else { + throw new RuntimeException(String.format( + "%s property must either be an integral number or an object, was %s instead", + LENGTH_PROP, + lengthProp.getClass().getName() + )); + } + } + + private Integer getIntegerNumberField(String property, String field, Map propsMap) { + Long result = getIntegralNumberField(property, field, propsMap); + if (result != null && (result < Integer.MIN_VALUE || result > Integer.MAX_VALUE)) { + throw new RuntimeException(String.format( + "'%s' field of %s property must be a valid int for int schemas", + field, + property + )); + } + return result != null ? result.intValue() : null; + } + + private Long getIntegralNumberField(String property, String field, Map propsMap) { + Object result = propsMap.get(field); + if (result == null || result instanceof Long) { + return (Long) result; + } else if (result instanceof Integer) { + return ((Integer) result).longValue(); + } else { + throw new RuntimeException(String.format( + "'%s' field of %s property must be an integral number, was %s instead", + field, + property, + result.getClass().getName() + )); + } + } + + private Float getFloatNumberField(String property, String field, Map propsMap) { + Double result = getDecimalNumberField(property, field, propsMap); + if (result != null && (result > Float.MAX_VALUE || result < -1 * Float.MIN_VALUE)) { + throw new RuntimeException(String.format( + "'%s' field of %s property must be a valid float for float schemas", + field, + property + )); + } + return result != null ? result.floatValue() : null; + } + + private Double getDecimalNumberField(String property, String field, Map propsMap) { + Object result = propsMap.get(field); + if (result == null || result instanceof Double) { + return (Double) result; + } else if (result instanceof Float) { + return ((Float) result).doubleValue(); + } else if (result instanceof Integer) { + return ((Integer) result).doubleValue(); + } else if (result instanceof Long) { + return ((Long) result).doubleValue(); + } else { + throw new RuntimeException(String.format( + "'%s' field of %s property must be a number, was %s instead", + field, + property, + result.getClass().getName() + )); + } + } + + private class LengthBounds { + public static final int DEFAULT_MIN = 8; + public static final int DEFAULT_MAX = 16; + + private final int min; + private final int max; + + public LengthBounds(int min, int max) { + this.min = min; + this.max = max; + } + + public LengthBounds(int exact) { + this(exact, exact + 1); + } + + public LengthBounds() { + this(DEFAULT_MIN, DEFAULT_MAX); + } + + public int random() { + return min + random.nextInt(max - min); + } + + public int min() { + return min; + } + + public int max() { + return max; + } + } + + private static class IntegralIterator implements Iterator { + public enum Type { + INTEGER, LONG + } + + private final long start; + private final long restart; + private final long step; + private final Type type; + private long current; + + public IntegralIterator(long start, long restart, long step, Type type) { + this.start = start; + this.restart = restart; + this.step = step; + this.type = type; + current = start; + } + + @Override + public Object next() { + long result = current; + if ((step > 0 && current >= restart - step) || (step < 0 && current <= restart - step)) { + current = start + modulo(step - (restart - current), restart - start); + } else { + current += step; + } + switch (type) { + case INTEGER: + return (int) result; + case LONG: + return result; + default: + throw new RuntimeException(String.format("Unexpected Type: %s", type)); + } + } + + @Override + public boolean hasNext() { + return true; + } + + // first % second, but with first guarantee that the result will always have the same sign as + // second + private static long modulo(long first, long second) { + return ((first % second) + second) % second; + } + } + + private static class DecimalIterator implements Iterator { + public enum Type { + FLOAT, DOUBLE + } + + private final double start; + private final double restart; + private final double step; + private final Type type; + private double current; + + public DecimalIterator(double start, double restart, double step, Type type) { + this.start = start; + this.restart = restart; + this.step = step; + this.type = type; + current = start; + } + + @Override + public Object next() { + double result = current; + if ((step > 0 && current >= restart - step) || (step < 0 && current <= restart - step)) { + current = start + modulo(step - (restart - current), restart - start); + } else { + current += step; + } + switch (type) { + case FLOAT: + return (float) result; + case DOUBLE: + return result; + default: + throw new RuntimeException(String.format("Unexpected Type: %s", type)); + } + } + + @Override + public boolean hasNext() { + return true; + } + + // first % second, but with first guarantee that the result will always have the same sign as + // second + private static double modulo(double first, double second) { + return ((first % second) + second) % second; + } + } + + private static class BooleanIterator implements Iterator { + private boolean current; + + public BooleanIterator(boolean start) { + current = start; + } + + @Override + public Boolean next() { + boolean result = current; + current = !current; + return result; + } + + @Override + public boolean hasNext() { + return true; + } + } +} diff --git a/ksql-examples/src/main/java/io/confluent/avro/random/generator/Main.java b/ksql-examples/src/main/java/io/confluent/avro/random/generator/Main.java new file mode 100644 index 000000000000..de74173a5abe --- /dev/null +++ b/ksql-examples/src/main/java/io/confluent/avro/random/generator/Main.java @@ -0,0 +1,299 @@ +package io.confluent.avro.random.generator; + +/** + * Created by hojjat on 8/15/17. + */ + + import org.apache.avro.generic.GenericDatumWriter; + import org.apache.avro.io.Encoder; + import org.apache.avro.io.EncoderFactory; + + import java.io.File; + import java.io.FileOutputStream; + import java.io.IOException; + import java.io.OutputStream; + import java.util.Arrays; + import java.util.Iterator; + import java.util.Random; + +/* TODO: Find a good argument parser that doesn't strip double quotes off of arguments and allows + for mutually exclusive options to cancel each other out without error */ +public class Main { + public static final String PROGRAM_NAME = "arg"; + + public static final String SCHEMA_SHORT_FLAG = "-s"; + public static final String SCHEMA_LONG_FLAG = "--schema"; + + public static final String SCHEMA_FILE_SHORT_FLAG = "-f"; + public static final String SCHEMA_FILE_LONG_FLAG = "--schema-file"; + + public static final String PRETTY_SHORT_FLAG = "-p"; + public static final String PRETTY_LONG_FLAG = "--pretty"; + + public static final String COMPACT_SHORT_FLAG = "-c"; + public static final String COMPACT_LONG_FLAG = "--compact"; + + public static final String JSON_SHORT_FLAG = "-j"; + public static final String JSON_LONG_FLAG = "--json"; + + public static final String BINARY_SHORT_FLAG = "-b"; + public static final String BINARY_LONG_FLAG = "--binary"; + + public static final String ITERATIONS_SHORT_FLAG = "-i"; + public static final String ITERATIONS_LONG_FLAG = "--iterations"; + + public static final String OUTPUT_FILE_SHORT_FLAG = "-o"; + public static final String OUTPUT_FILE_LONG_FLAG = "--output"; + + public static final String HELP_SHORT_FLAG_1 = "-?"; + public static final String HELP_SHORT_FLAG_2 = "-h"; + public static final String HELP_LONG_FLAG = "--help"; + + private static final boolean PRETTY_FORMAT = true; + private static final boolean COMPACT_FORMAT = false; + + private static final boolean JSON_ENCODING = true; + private static final boolean BINARY_ENCODING = false; + + /** + * Parses options passed in via the args argument to main() and then leverages a new + * {@link Generator} object to produce randomized output according to the parsed options. + */ + public static void main(String[] args) { + String schema = null; + String schemaFile = "-"; + + boolean jsonFormat = PRETTY_FORMAT; + + boolean encoding = JSON_ENCODING; + + long iterations = 1; + String outputFile = null; + + Iterator argv = Arrays.asList(args).iterator(); + while (argv.hasNext()) { + String flag = argv.next(); + switch (flag) { + case SCHEMA_SHORT_FLAG: + case SCHEMA_LONG_FLAG: + schemaFile = null; + schema = nextArg(argv, flag); + break; + case SCHEMA_FILE_SHORT_FLAG: + case SCHEMA_FILE_LONG_FLAG: + schema = null; + schemaFile = nextArg(argv, flag); + break; + case PRETTY_SHORT_FLAG: + case PRETTY_LONG_FLAG: + jsonFormat = PRETTY_FORMAT; + break; + case COMPACT_SHORT_FLAG: + case COMPACT_LONG_FLAG: + jsonFormat = COMPACT_FORMAT; + break; + case JSON_SHORT_FLAG: + case JSON_LONG_FLAG: + encoding = JSON_ENCODING; + break; + case BINARY_SHORT_FLAG: + case BINARY_LONG_FLAG: + encoding = BINARY_ENCODING; + break; + case ITERATIONS_SHORT_FLAG: + case ITERATIONS_LONG_FLAG: + iterations = parseIterations(nextArg(argv, flag), flag); + break; + case OUTPUT_FILE_SHORT_FLAG: + case OUTPUT_FILE_LONG_FLAG: + outputFile = nextArg(argv, flag); + break; + case HELP_SHORT_FLAG_1: + case HELP_SHORT_FLAG_2: + case HELP_LONG_FLAG: + usage(); + break; + default: + System.err.printf("%s: %s: unrecognized option%n%n", PROGRAM_NAME, flag); + usage(1); + } + } + + Generator generator = null; + try { + generator = getGenerator(schema, schemaFile); + } catch (IOException ioe) { + System.err.println("Error occurred while trying to read schema file"); + System.exit(1); + } + + try (OutputStream output = getOutput(outputFile)) { + Encoder encoder; + + if (encoding == JSON_ENCODING) { + encoder = EncoderFactory.get().jsonEncoder(generator.schema(), output, jsonFormat); + } else { + encoder = EncoderFactory.get().binaryEncoder(output, null); + } + + GenericDatumWriter objectWriter = new GenericDatumWriter<>(generator.schema()); + for (int i = 0; i < iterations; i++) { + objectWriter.write(generator.generate(), encoder); + } + encoder.flush(); + output.write('\n'); + } catch (IOException ioe) { + System.err.println("Error occurred while trying to write to output file"); + System.exit(1); + } + } + + private static long parseIterations(String arg, String flag) { + try { + long result = Long.parseLong(arg); + if (result < 0) { + System.err.printf("%s: %s: argument cannot be negative%n", PROGRAM_NAME, flag); + usage(1); + } + return result; + } catch (NumberFormatException nfe) { + System.err.printf("%s: %s: argument must be a number%n", PROGRAM_NAME, flag); + usage(1); + } + System.err.println( + "This statement was put in to make the compiler happy." + + " If you are seeing it, something has gone very wrong." + ); + System.exit(1); + return 0L; + } + + private static String nextArg(Iterator argv, String flag) { + if (!argv.hasNext()) { + System.err.printf("%s: %s: argument required%n", PROGRAM_NAME, flag); + usage(1); + } + return argv.next(); + } + + private static void usage() { + usage(0); + } + + private static void usage(int exitValue) { + String header = String.format("%s: Generate random Avro data%n", PROGRAM_NAME); + + String summary = String.format( + "Usage: %s [%s | %s ] [%s | %s] [%s | %s] [%s ] [%s ]%n%n", + PROGRAM_NAME, + SCHEMA_FILE_SHORT_FLAG, + SCHEMA_SHORT_FLAG, + JSON_SHORT_FLAG, + BINARY_SHORT_FLAG, + PRETTY_SHORT_FLAG, + COMPACT_SHORT_FLAG, + ITERATIONS_SHORT_FLAG, + OUTPUT_FILE_SHORT_FLAG + ); + + final String indentation = " "; + final String separation = "\t"; + String flags = + "Flags:\n" + + String.format( + "%s%s, %s, %s:%s%s%n", + indentation, + HELP_SHORT_FLAG_1, + HELP_SHORT_FLAG_2, + HELP_LONG_FLAG, + separation, + "Print a brief usage summary and exit with status 0" + ) + String.format( + "%s%s, %s:%s%s%n", + indentation, + BINARY_SHORT_FLAG, + BINARY_LONG_FLAG, + separation, + "Encode outputted data in binary format" + ) + String.format( + "%s%s, %s:%s%s%n", + indentation, + COMPACT_SHORT_FLAG, + COMPACT_LONG_FLAG, + separation, + "Output each record on a single line of its own (has no effect if encoding is not JSON)" + ) + String.format( + "%s%s , %s :%s%s%n", + indentation, + SCHEMA_FILE_SHORT_FLAG, + SCHEMA_FILE_LONG_FLAG, + separation, + "Read the schema to spoof from , or stdin if is '-' (default is '-')" + ) + String.format( + "%s%s , %s :%s%s%n", + indentation, + ITERATIONS_SHORT_FLAG, + ITERATIONS_LONG_FLAG, + separation, + "Output iterations of spoofed data (default is 1)" + ) + String.format( + "%s%s, %s:%s%s%n", + indentation, + JSON_SHORT_FLAG, + JSON_LONG_FLAG, + separation, + "Encode outputted data in JSON format (default)" + ) + String.format( + "%s%s , %s :%s%s%n", + indentation, + OUTPUT_FILE_SHORT_FLAG, + OUTPUT_FILE_LONG_FLAG, + separation, + "Write data to the file , or stdout if is '-' (default is '-')" + ) + String.format( + "%s%s, %s:%s%s%n", + indentation, + PRETTY_SHORT_FLAG, + PRETTY_LONG_FLAG, + separation, + "Output each record in prettified format (has no effect if encoding is not JSON)" + + "(default)" + ) + String.format( + "%s%s , %s :%s%s%n", + indentation, + SCHEMA_SHORT_FLAG, + SCHEMA_LONG_FLAG, + separation, + "Spoof the schema " + ) + "\n"; + + String footer = String.format( + "%s%n%s%n", + "Currently on Chris Egerton's public GitHub:", + "https://github.com/C0urante/avro-random-generator" + ); + + System.err.printf(header + summary + flags + footer); + System.exit(exitValue); + } + + private static Generator getGenerator(String schema, String schemaFile) throws IOException { + Random random = new Random(); + if (schema != null) { + return new Generator(schema, random); + } else if (!schemaFile.equals("-")) { + return new Generator(new File(schemaFile), random); + } else { + System.err.println("Reading schema from stdin..."); + return new Generator(System.in, random); + } + } + + private static OutputStream getOutput(String outputFile) throws IOException { + if (outputFile != null && !outputFile.equals("-")) { + return new FileOutputStream(outputFile); + } else { + return System.out; + } + } +} \ No newline at end of file diff --git a/ksql-examples/src/main/java/io/confluent/ksql/datagen/AvroConsumer.java b/ksql-examples/src/main/java/io/confluent/ksql/datagen/AvroConsumer.java new file mode 100644 index 000000000000..2ba22cb9f714 --- /dev/null +++ b/ksql-examples/src/main/java/io/confluent/ksql/datagen/AvroConsumer.java @@ -0,0 +1,142 @@ +/** + * Copyright 2017 Confluent Inc. + **/ +package io.confluent.ksql.datagen; + +import io.confluent.ksql.physical.GenericRow; +import io.confluent.ksql.serde.avro.KsqlGenericRowAvroDeserializer; +import io.confluent.ksql.serde.avro.KsqlGenericRowAvroSerializer; +import org.apache.kafka.clients.consumer.ConsumerConfig; +import org.apache.kafka.common.serialization.Deserializer; +import org.apache.kafka.common.serialization.Serde; +import org.apache.kafka.common.serialization.Serdes; +import org.apache.kafka.common.serialization.Serializer; +import org.apache.kafka.streams.KafkaStreams; +import org.apache.kafka.streams.KeyValue; +import org.apache.kafka.streams.StreamsConfig; +import org.apache.kafka.streams.kstream.KStream; +import org.apache.kafka.streams.kstream.KStreamBuilder; +import org.apache.kafka.streams.kstream.KeyValueMapper; + +import java.util.HashMap; +import java.util.Map; +import java.util.Properties; + +public class AvroConsumer { + + static String schemaStr = "{" + + "\"namespace\": \"ksql\"," + + " \"name\": \"orders\"," + + " \"type\": \"record\"," + + " \"fields\": [" + + " {\"name\": \"ordertime\", \"type\": \"long\"}," + + " {\"name\": \"orderid\", \"type\": \"string\"}," + + " {\"name\": \"itemid\", \"type\": \"string\"}," + + " {\"name\": \"orderunits\", \"type\": \"double\"}" + + " ]" + + "}"; + + static Serde genericRowSerde = null; + + private static Serde getGenericRowSerde() { + if (genericRowSerde == null) { + Map serdeProps = new HashMap<>(); + serdeProps.put(KsqlGenericRowAvroSerializer.AVRO_SERDE_SCHEMA_CONFIG, schemaStr); + + final Serializer genericRowSerializer = new KsqlGenericRowAvroSerializer(null); + genericRowSerializer.configure(serdeProps, false); + + final Deserializer genericRowDeserializer = new KsqlGenericRowAvroDeserializer(null); + genericRowDeserializer.configure(serdeProps, false); + + genericRowSerde = Serdes.serdeFrom(genericRowSerializer, genericRowDeserializer); + } + return genericRowSerde; + } + + public void printGenericRowTopic(String topicName) { + + Properties props = new Properties(); + props.put(StreamsConfig.APPLICATION_ID_CONFIG, + "StreamExampleGenericRowProcessor-" + System.currentTimeMillis()); + props.put(StreamsConfig.BOOTSTRAP_SERVERS_CONFIG, "localhost:9092"); + props.put(StreamsConfig.COMMIT_INTERVAL_MS_CONFIG, 0); + props.put(StreamsConfig.CACHE_MAX_BYTES_BUFFERING_CONFIG, 0); + + // setting offset reset to earliest so that we can re-run the demo code with the same pre-loaded data + props.put(ConsumerConfig.AUTO_OFFSET_RESET_CONFIG, "earliest"); + + KStreamBuilder builder = new KStreamBuilder(); + + KStream + source = builder.stream(Serdes.String(), getGenericRowSerde(), topicName); + + source.print(); + + KafkaStreams streams = new KafkaStreams(builder, props); + streams.start(); + + // usually the stream application would be running forever, + // in this example we just let it run for some time and stop since the input data is finite. + try { + Thread.sleep(10000L); + } catch (InterruptedException e) { + e.printStackTrace(); + } + + streams.close(); + streams.cleanUp(); + } + + public void processGenericRow() { + + Properties props = new Properties(); + props.put(StreamsConfig.APPLICATION_ID_CONFIG, "StreamExample1-GenericRow-Processor"); + props.put(StreamsConfig.BOOTSTRAP_SERVERS_CONFIG, "localhost:9092"); + + // setting offset reset to earliest so that we can re-run the demo code with the same pre-loaded data + props.put(ConsumerConfig.AUTO_OFFSET_RESET_CONFIG, "earliest"); + props.put(StreamsConfig.COMMIT_INTERVAL_MS_CONFIG, 0); + props.put(StreamsConfig.CACHE_MAX_BYTES_BUFFERING_CONFIG, 0); + + KStreamBuilder builder = new KStreamBuilder(); + + KStream + source = + builder.stream(Serdes.String(), genericRowSerde, "StreamExample1-GenericRow-order"); + + source.map(new KSQLPrintKeyValueMapper()); + + KafkaStreams streams = new KafkaStreams(builder, props); + streams.start(); + + // usually the stream application would be running forever, + // in this example we just let it run for some time and stop since the input data is finite. + try { + Thread.sleep(1000L); + } catch (InterruptedException e) { + e.printStackTrace(); + } + + streams.close(); + streams.cleanUp(); + } + + class KSQLPrintKeyValueMapper + implements KeyValueMapper> { + + public KSQLPrintKeyValueMapper() { + } + + @Override + public KeyValue apply(String key, GenericRow row) { + System.out.println(row); + return new KeyValue(key, row); + } + } + + public static void main(String[] args) { + new AvroConsumer().printGenericRowTopic("bigorders_topic"); + } + +} diff --git a/ksql-examples/src/main/java/io/confluent/ksql/datagen/AvroProducer.java b/ksql-examples/src/main/java/io/confluent/ksql/datagen/AvroProducer.java new file mode 100644 index 000000000000..715ff2eaba0d --- /dev/null +++ b/ksql-examples/src/main/java/io/confluent/ksql/datagen/AvroProducer.java @@ -0,0 +1,28 @@ +/** + * Copyright 2017 Confluent Inc. + **/ +package io.confluent.ksql.datagen; + +import io.confluent.ksql.physical.GenericRow; +import io.confluent.ksql.serde.avro.KsqlGenericRowAvroSerializer; +import org.apache.avro.Schema; +import org.apache.kafka.common.serialization.Serializer; + +import java.util.HashMap; +import java.util.Map; + +public class AvroProducer extends DataGenProducer { + + @Override + protected Serializer getSerializer( + Schema avroSchema, + org.apache.kafka.connect.data.Schema kafkaSchema, + String topicName + ) { + Serializer result = new KsqlGenericRowAvroSerializer(kafkaSchema); + Map serializerConfiguration = new HashMap<>(); + serializerConfiguration.put(KsqlGenericRowAvroSerializer.AVRO_SERDE_SCHEMA_CONFIG, avroSchema.toString()); + result.configure(serializerConfiguration, false); + return result; + } +} \ No newline at end of file diff --git a/ksql-examples/src/main/java/io/confluent/ksql/datagen/DataGen.java b/ksql-examples/src/main/java/io/confluent/ksql/datagen/DataGen.java new file mode 100644 index 000000000000..25b79f8c7867 --- /dev/null +++ b/ksql-examples/src/main/java/io/confluent/ksql/datagen/DataGen.java @@ -0,0 +1,344 @@ +/** + * Copyright 2017 Confluent Inc. + */ +package io.confluent.ksql.datagen; + +import io.confluent.avro.random.generator.Generator; + +import java.io.FileInputStream; +import java.io.IOException; +import java.io.InputStream; +import java.util.Arrays; +import java.util.Objects; +import java.util.Optional; +import java.util.Properties; +import java.util.Random; + +public class DataGen { + + public static void main(String[] args) { + Arguments arguments; + + try { + arguments = new Arguments.Builder().parseArgs(args).build(); + } catch (Arguments.ArgumentParseException exception) { + System.err.println(exception.getMessage()); + usage(1); + return; + } catch (IOException exception) { + System.err.printf("IOException encountered: %s%n", exception.getMessage()); + return; + } + + if (arguments.help) { + usage(0); + } + + + Generator generator; + try { + generator = new Generator(arguments.schemaFile, new Random()); + } catch (IOException exception) { + System.err.printf("IOException encountered: %s%n", exception.getMessage()); + return; + } + DataGenProducer dataProducer; + + switch (arguments.format) { + case AVRO: + dataProducer = new AvroProducer(); + break; + case JSON: + dataProducer = new JsonProducer(); + break; + case DELIMITED: + dataProducer = new DelimitedProducer(); + break; + default: + System.err.printf("Invalid format in '%s'; was expecting one of AVRO, JSON, or DELIMITED%n", arguments.format); + usage(1); + return; + } + + Properties props = new Properties(); + props.put("bootstrap.servers", arguments.bootstrapServer); + props.put("client.id", "KSQLDataGenProducer"); + + dataProducer.populateTopic(props, generator, arguments.topicName, arguments.keyName, + arguments.iterations, arguments.maxInterval); + } + + private static void usage() { + System.err.println( + "usage: DataGen " + + "[help] " + + "[bootstrap-server= (defaults to localhost:9092)] " + + "[quickstart= (case-insensitive; one of 'orders', 'users', or " + + "'pageviews')] " + + "schema= " + + "format= (case-insensitive; one of 'avro', 'json', or 'delimited') " + + "topic= " + + "key= " + + "[iterations= (defaults to 1,000,000)] " + + "[maxInterval= (defaults to 500)]" + ); + } + + private static void usage(int exitValue) { + usage(); + System.exit(exitValue); + } + + private static class Arguments { + public enum Format { AVRO, JSON, DELIMITED } + + public final boolean help; + public final String bootstrapServer; + public final InputStream schemaFile; + public final Format format; + public final String topicName; + public final String keyName; + public final int iterations; + public final long maxInterval; + + public Arguments( + boolean help, + String bootstrapServer, + InputStream schemaFile, + Format format, + String topicName, + String keyName, + int iterations, + long maxInterval + ) { + this.help = help; + this.bootstrapServer = bootstrapServer; + this.schemaFile = schemaFile; + this.format = format; + this.topicName = topicName; + this.keyName = keyName; + this.iterations = iterations; + this.maxInterval = maxInterval; + } + + public static class ArgumentParseException extends RuntimeException { + public ArgumentParseException(String message) { + super(message); + } + } + + public static class Builder { + private Quickstart quickstart; + + private boolean help; + private String bootstrapServer; + private InputStream schemaFile; + private Format format; + private String topicName; + private String keyName; + private int iterations; + private long maxInterval; + + public Builder() { + quickstart = null; + help = false; + bootstrapServer = "localhost:9092"; + schemaFile = null; + format = null; + topicName = null; + keyName = null; + iterations = 1000000; + maxInterval = -1; + } + + private enum Quickstart { + CLICKSTREAM_CODES("clickstream_codes_schema.avro", "clickstream", "code"), + CLICKSTREAM("clickstream_schema.avro", "clickstream", "ip"), + CLICKSTREAM_USERS("clickstream_users_schema.avro", "webusers", "user_id"), + ORDERS("orders_schema.avro", "orders", "orderid"), + USERS("users_schema.avro", "users", "userid"), + PAGEVIEWS("pageviews_schema.avro", "pageviews", "viewtime"); + + private final String schemaFileName; + private final String rootTopicName; + private final String keyName; + + Quickstart(String schemaFileName, String rootTopicName, String keyName) { + this.schemaFileName = schemaFileName; + this.rootTopicName = rootTopicName; + this.keyName = keyName; + } + + public InputStream getSchemaFile() { + return getClass().getClassLoader().getResourceAsStream(schemaFileName); + } + + public String getTopicName(Format format) { + return String.format("%s_kafka_topic_%s", rootTopicName, format.name().toLowerCase()); + } + + public String getKeyName() { + return keyName; + } + + public Format getFormat() { + return Format.JSON; + } + + } + + public Arguments build() { + if (help) { + return new Arguments(true, null, null, null, null, null, 0, -1); + } + + if (quickstart != null) { + schemaFile = Optional.ofNullable(schemaFile).orElse(quickstart.getSchemaFile()); + format = Optional.ofNullable(format).orElse(quickstart.getFormat()); + topicName = Optional.ofNullable(topicName).orElse(quickstart.getTopicName(format)); + keyName = Optional.ofNullable(keyName).orElse(quickstart.getKeyName()); + } + + try { + Objects.requireNonNull(schemaFile, "Schema file not provided"); + Objects.requireNonNull(format, "Message format not provided"); + Objects.requireNonNull(topicName, "Kafka topic name not provided"); + Objects.requireNonNull(keyName, "Name of key column not provided"); + } catch (NullPointerException exception) { + throw new ArgumentParseException(exception.getMessage()); + } + return new Arguments(help, bootstrapServer, schemaFile, format, topicName, keyName, + iterations, maxInterval); + } + + public Builder parseArgs(String[] args) throws IOException { + for (String arg : args) { + parseArg(arg); + } + return this; + } + + public Builder parseArg(String arg) throws IOException { + + if ("help".equals(arg)) { + help = true; + return this; + } + + String[] splitOnEquals = arg.split("="); + if (splitOnEquals.length != 2) { + throw new ArgumentParseException(String.format( + "Invalid argument format in '%s'; expected =", + arg + )); + } + + String argName = splitOnEquals[0].trim(); + String argValue = splitOnEquals[1].trim(); + + if (argName.isEmpty()) { + throw new ArgumentParseException(String.format( + "Empty argument name in %s", + arg + )); + } + + if (argValue.isEmpty()) { + throw new ArgumentParseException(String.format( + "Empty argument value in '%s'", + arg + )); + } + + switch (argName) { + case "quickstart": + try { + quickstart = Quickstart.valueOf(argValue.toUpperCase()); + } catch (IllegalArgumentException iae) { + throw new ArgumentParseException(String.format( + "Invalid quickstart in '%s'; was expecting one of " + + Arrays.toString(Quickstart.values()) + + " (case-insensitive)", + argValue + )); + } + break; + case "bootstrap-server": + bootstrapServer = argValue; + break; + case "schema": + schemaFile = new FileInputStream(argValue); + break; + case "format": + format = parseFormat(argValue); + break; + case "topic": + topicName = argValue; + break; + case "key": + keyName = argValue; + break; + case "iterations": + iterations = parseIterations(argValue); + break; + case "maxInterval": + maxInterval = parseIterations(argValue); + break; + default: + throw new ArgumentParseException(String.format( + "Unknown argument name in '%s'", + argName + )); + } + return this; + } + + private Format parseFormat(String formatString) { + try { + return Format.valueOf(formatString.toUpperCase()); + } catch (IllegalArgumentException exception) { + throw new ArgumentParseException(String.format( + "Invalid format in '%s'; was expecting one of AVRO, JSON, or DELIMITED (case-insensitive)", + formatString + )); + } + } + + private int parseIterations(String iterationsString) { + try { + int result = Integer.valueOf(iterationsString, 10); + if (result <= 0) { + throw new ArgumentParseException(String.format( + "Invalid number of iterations in '%d'; must be a positive number", + result + )); + } + return Integer.valueOf(iterationsString, 10); + } catch (NumberFormatException exception) { + throw new ArgumentParseException(String.format( + "Invalid number of iterations in '%s'; must be a valid base 10 integer", + iterationsString + )); + } + } + + private long parseMaxInterval(String maxIntervalString) { + try { + long result = Long.valueOf(maxIntervalString, 10); + if (result <= 0) { + throw new ArgumentParseException(String.format( + "Invalid number of maxInterval in '%d'; must be a positive number", + result + )); + } + return Long.valueOf(maxIntervalString, 10); + } catch (NumberFormatException exception) { + throw new ArgumentParseException(String.format( + "Invalid number of maxInterval in '%s'; must be a valid base 10 long", + maxIntervalString + )); + } + } + } + } +} diff --git a/ksql-examples/src/main/java/io/confluent/ksql/datagen/DataGenProducer.java b/ksql-examples/src/main/java/io/confluent/ksql/datagen/DataGenProducer.java new file mode 100644 index 000000000000..ad37c69517bd --- /dev/null +++ b/ksql-examples/src/main/java/io/confluent/ksql/datagen/DataGenProducer.java @@ -0,0 +1,195 @@ +/** + * Copyright 2017 Confluent Inc. + **/ + +package io.confluent.ksql.datagen; + +import io.confluent.avro.random.generator.Generator; +import io.confluent.connect.avro.AvroData; +import io.confluent.ksql.physical.GenericRow; +import org.apache.avro.Schema; +import org.apache.avro.generic.GenericRecord; +import org.apache.kafka.clients.producer.KafkaProducer; +import org.apache.kafka.clients.producer.ProducerRecord; +import org.apache.kafka.common.serialization.Serializer; +import org.apache.kafka.common.serialization.StringSerializer; + +import java.text.SimpleDateFormat; +import java.util.*; + +public abstract class DataGenProducer { + + + + + // Max 100 ms between messsages. + public static final long INTER_MESSAGE_MAX_INTERVAL = 500; + + public void populateTopic( + Properties props, + Generator generator, + String kafkaTopicName, + String key, + int messageCount, + long maxInterval + ) { + if (maxInterval < 0) { + maxInterval = INTER_MESSAGE_MAX_INTERVAL; + } + Schema avroSchema = generator.schema(); + org.apache.kafka.connect.data.Schema kafkaSchema = new AvroData(1).toConnectSchema(avroSchema); + + Serializer serializer = getSerializer(avroSchema, kafkaSchema, kafkaTopicName); + + final KafkaProducer producer = new KafkaProducer<>(props, new StringSerializer(), serializer); + + SessionManager sessionManager = new SessionManager(); + + for (int i = 0; i < messageCount; i++) { + Object generatedObject = generator.generate(); + + if (!(generatedObject instanceof GenericRecord)) { + throw new RuntimeException(String.format( + "Expected Avro Random Generator to return instance of GenericRecord, found %s instead", + generatedObject.getClass().getName() + )); + } + GenericRecord randomAvroMessage = (GenericRecord) generatedObject; + + List genericRowValues = new ArrayList<>(); + + SimpleDateFormat timeformatter = null; + + /** + * Populate the record entries + */ + for (Schema.Field field : avroSchema.getFields()) { + + String isSession = field.schema().getProp("session"); + String timeFormatFromLong = field.schema().getProp("format_as_time"); + if (isSession != null) { + String currentValue = (String) randomAvroMessage.get(field.name()); + String newCurrentValue = handleSessionisationOfValue(sessionManager, currentValue); + + + genericRowValues.add(newCurrentValue); + + } else if (timeFormatFromLong != null) { + Date date = new Date(System.currentTimeMillis()); + if (timeFormatFromLong.equals("unix_long")) { + genericRowValues.add(date.getTime()); + } else { + if (timeformatter == null) { + timeformatter = new SimpleDateFormat(timeFormatFromLong); + } + genericRowValues.add(timeformatter.format(date)); + } + } else { + genericRowValues.add(randomAvroMessage.get(field.name())); + } + } + + GenericRow genericRow = new GenericRow(genericRowValues); + + String keyString = randomAvroMessage.get(key).toString(); + + ProducerRecord producerRecord = new ProducerRecord<>(kafkaTopicName, keyString, genericRow); + producer.send(producerRecord); + System.err.println(keyString + " --> (" + genericRow + ")"); + try { + Thread.sleep((long)(maxInterval * Math.random())); + } catch (InterruptedException e) { + // Ignore the exception. + } + } + producer.flush(); + producer.close(); + } + + + /** + * If the sessionId is new Create a Session + * If the sessionId is active - return the value + * If the sessionId has expired - use a known token that is not expired + * @param sessionManager + * @param currentValue + * @return + */ + Set allTokens = new HashSet(); + + private String handleSessionisationOfValue(SessionManager sessionManager, String currentValue) { + + // superset of all values + allTokens.add(currentValue); + + /** + * handle known sessions + */ + if (sessionManager.isActive(currentValue)) { + if (sessionManager.isExpired(currentValue)) { + sessionManager.isActiveAndExpire(currentValue); + return currentValue; + } else { + return currentValue; + } + } + /** + * If session count maxed out - reuse session tokens + */ + if (sessionManager.getActiveSessionCount() > sessionManager.getMaxSessions()) { + return sessionManager.getRandomActiveToken(); + } + + /** + * Force expiring tokens to expire + */ + String expired = sessionManager.getActiveSessionThatHasExpired(); + if (expired != null) { + return expired; + } + + /** + * Use accummulated SessionTokens-tokens, or recycle old tokens or blow-up + */ + String value = null; + for (String token : allTokens) { + if (value == null) { + if (!sessionManager.isActive(token) && !sessionManager.isExpired(token)) { + value = token; + } + } + } + + if (value != null) { +// System.out.println("1-New Session:" + value + " Sessions: " + sessionManager.getActiveSessionCount()); + sessionManager.newSession(value); + } else { + value = sessionManager.recycleOldestExpired(); + if (value == null) { + new RuntimeException("Ran out of tokens to rejuice - increase session-duration (300s), reduce-number of sessions(5), number of tokens in the avro template"); + } +// System.out.println("2-New [Recycle] Session:" + value + " Tokens:" + allTokens.size()); + sessionManager.newSession(value); + return value; + } + return currentValue; + + } + + private String getRandomToken(Set collected){ + if (collected.size() == 0) return null; + List values = new ArrayList<>(collected); + int index = (int) (Math.random() * values.size()); + String value = values.remove(index); + collected.remove(value); + return value; + } + + + protected abstract Serializer getSerializer( + Schema avroSchema, + org.apache.kafka.connect.data.Schema kafkaSchema, + String topicName + ); + +} \ No newline at end of file diff --git a/ksql-examples/src/main/java/io/confluent/ksql/datagen/DelimitedConsumer.java b/ksql-examples/src/main/java/io/confluent/ksql/datagen/DelimitedConsumer.java new file mode 100644 index 000000000000..676d3ea02964 --- /dev/null +++ b/ksql-examples/src/main/java/io/confluent/ksql/datagen/DelimitedConsumer.java @@ -0,0 +1,82 @@ +/** + * Copyright 2017 Confluent Inc. + **/ +package io.confluent.ksql.datagen; + +import io.confluent.ksql.physical.GenericRow; +import io.confluent.ksql.serde.delimited.KsqlDelimitedDeserializer; +import io.confluent.ksql.serde.delimited.KsqlDelimitedSerializer; +import org.apache.kafka.clients.consumer.ConsumerConfig; +import org.apache.kafka.common.serialization.Deserializer; +import org.apache.kafka.common.serialization.Serde; +import org.apache.kafka.common.serialization.Serdes; +import org.apache.kafka.common.serialization.Serializer; +import org.apache.kafka.streams.KafkaStreams; +import org.apache.kafka.streams.StreamsConfig; +import org.apache.kafka.streams.kstream.KStream; +import org.apache.kafka.streams.kstream.KStreamBuilder; + +import java.util.HashMap; +import java.util.Map; +import java.util.Properties; + +/** + * Created by hojjat on 1/3/17. + */ +public class DelimitedConsumer { + + static Serde genericRowSerde = null; + + private static Serde getGenericRowSerde() { + if (genericRowSerde == null) { + Map serdeProps = new HashMap<>(); + + final Serializer genericRowSerializer = new KsqlDelimitedSerializer(); + genericRowSerializer.configure(serdeProps, false); + + final Deserializer genericRowDeserializer = new KsqlDelimitedDeserializer(null); + genericRowDeserializer.configure(serdeProps, false); + + genericRowSerde = Serdes.serdeFrom(genericRowSerializer, genericRowDeserializer); + } + return genericRowSerde; + } + + public void printGenericRowTopic(String topicName) { + + Properties props = new Properties(); + props.put(StreamsConfig.APPLICATION_ID_CONFIG, + "StreamExampleGenericRowProcessor-" + System.currentTimeMillis()); + props.put(StreamsConfig.BOOTSTRAP_SERVERS_CONFIG, "localhost:9092"); + + // setting offset reset to earliest so that we can re-run the demo code with the same pre-loaded data + props.put(ConsumerConfig.AUTO_OFFSET_RESET_CONFIG, "earliest"); + props.put(StreamsConfig.COMMIT_INTERVAL_MS_CONFIG, 0); + props.put(StreamsConfig.CACHE_MAX_BYTES_BUFFERING_CONFIG, 0); + + KStreamBuilder builder = new KStreamBuilder(); + + KStream + source = builder.stream(Serdes.String(), getGenericRowSerde(), topicName); + + source.print(); + + KafkaStreams streams = new KafkaStreams(builder, props); + streams.start(); + + // usually the stream application would be running forever, + // in this example we just let it run for some time and stop since the input data is finite. + try { + Thread.sleep(10000L); + } catch (InterruptedException e) { + e.printStackTrace(); + } + + streams.close(); + streams.cleanUp(); + } + + public static void main(String[] args) { + new DelimitedConsumer().printGenericRowTopic("ENRICHEDFEMALE_CSV"); + } +} diff --git a/ksql-examples/src/main/java/io/confluent/ksql/datagen/DelimitedProducer.java b/ksql-examples/src/main/java/io/confluent/ksql/datagen/DelimitedProducer.java new file mode 100644 index 000000000000..56082f077348 --- /dev/null +++ b/ksql-examples/src/main/java/io/confluent/ksql/datagen/DelimitedProducer.java @@ -0,0 +1,21 @@ +/** + * Copyright 2017 Confluent Inc. + **/ +package io.confluent.ksql.datagen; + +import io.confluent.ksql.physical.GenericRow; +import io.confluent.ksql.serde.delimited.KsqlDelimitedSerializer; +import org.apache.avro.Schema; +import org.apache.kafka.common.serialization.Serializer; + +public class DelimitedProducer extends DataGenProducer { + + @Override + protected Serializer getSerializer( + Schema avroSchema, + org.apache.kafka.connect.data.Schema kafkaSchema, + String topicName + ) { + return new KsqlDelimitedSerializer(); + } +} diff --git a/ksql-examples/src/main/java/io/confluent/ksql/datagen/JsonConsumer.java b/ksql-examples/src/main/java/io/confluent/ksql/datagen/JsonConsumer.java new file mode 100644 index 000000000000..b426ff8b0aac --- /dev/null +++ b/ksql-examples/src/main/java/io/confluent/ksql/datagen/JsonConsumer.java @@ -0,0 +1,249 @@ +/** + * Copyright 2017 Confluent Inc. + **/ +package io.confluent.ksql.datagen; + +import io.confluent.ksql.physical.GenericRow; +import io.confluent.ksql.serde.json.KsqlJsonDeserializer; +import io.confluent.ksql.serde.json.KsqlJsonSerializer; +import org.apache.kafka.clients.consumer.ConsumerConfig; +import org.apache.kafka.common.serialization.Deserializer; +import org.apache.kafka.common.serialization.Serde; +import org.apache.kafka.common.serialization.Serdes; +import org.apache.kafka.common.serialization.Serializer; +import org.apache.kafka.streams.KafkaStreams; +import org.apache.kafka.streams.KeyValue; +import org.apache.kafka.streams.StreamsConfig; +import org.apache.kafka.streams.kstream.Aggregator; +import org.apache.kafka.streams.kstream.Initializer; +import org.apache.kafka.streams.kstream.KStream; +import org.apache.kafka.streams.kstream.KStreamBuilder; +import org.apache.kafka.streams.kstream.KTable; +import org.apache.kafka.streams.kstream.KeyValueMapper; +import org.apache.kafka.streams.kstream.Predicate; +import org.apache.kafka.streams.kstream.TimeWindows; +import org.apache.kafka.streams.kstream.ValueMapper; +import org.apache.kafka.streams.kstream.Windowed; + +import java.util.Arrays; +import java.util.HashMap; +import java.util.List; +import java.util.Map; +import java.util.Properties; + + +public class JsonConsumer { + + static Serde genericRowSerde = null; + + public void printGenericRowTopic(String topicName) { + + Properties props = new Properties(); + props.put(StreamsConfig.APPLICATION_ID_CONFIG, + "StreamExampleGenericRowProcessor-" + System.currentTimeMillis()); + props.put(StreamsConfig.BOOTSTRAP_SERVERS_CONFIG, "localhost:9092"); + + // setting offset reset to earliest so that we can re-run the demo code with the same pre-loaded data + props.put(ConsumerConfig.AUTO_OFFSET_RESET_CONFIG, "earliest"); + props.put(StreamsConfig.CACHE_MAX_BYTES_BUFFERING_CONFIG, 100); + + KStreamBuilder builder = new KStreamBuilder(); + + KTable + source = + builder.table(Serdes.String(), getGenericRowSerde(), topicName, "users"); + + source.mapValues(new ValueMapper() { + @Override + public GenericRow apply(GenericRow genericRow) { + System.out.println(genericRow.toString()); + return genericRow; + } + }); + + KafkaStreams streams = new KafkaStreams(builder, props); + streams.start(); + + // usually the stream application would be running forever, + // in this example we just let it run for some time and stop since the input data is finite. + try { + Thread.sleep(5000L); + } catch (InterruptedException e) { + e.printStackTrace(); + } + + streams.close(); + streams.cleanUp(); + } + + public void processGenericRow() { + + Properties props = new Properties(); + props.put(StreamsConfig.APPLICATION_ID_CONFIG, "StreamExample1-GenericRow-Processor"); + props.put(StreamsConfig.BOOTSTRAP_SERVERS_CONFIG, "localhost:9092"); + + // setting offset reset to earliest so that we can re-run the demo code with the same pre-loaded data + props.put(ConsumerConfig.AUTO_OFFSET_RESET_CONFIG, "earliest"); + + KStreamBuilder builder = new KStreamBuilder(); + + KStream + source = + builder.stream(Serdes.String(), genericRowSerde, "StreamExample1-GenericRow-order"); + + KStream orderFilter = orderFilter(source); + + KStream orderProject = orderProject(orderFilter); + + KTable, Long> orderAggregate = orderUnitsPer10Seconds(orderProject); + + orderAggregate.print(); + + KafkaStreams streams = new KafkaStreams(builder, props); + streams.start(); + + // usually the stream application would be running forever, + // in this example we just let it run for some time and stop since the input data is finite. + try { + Thread.sleep(1000L); + } catch (InterruptedException e) { + e.printStackTrace(); + } + + streams.close(); + streams.cleanUp(); + } + + private KStream orderFilter(KStream orderStream) { + + KStream + orderFilter = + orderStream.filter(new Predicate() { + @Override + public boolean test(String key, GenericRow value) { + int units = (Integer) value.getColumns().get(3); + return units > 5; + } + }); + return orderFilter; + } + + private KStream orderProject(KStream orderStream) { + + KStream + orderProject = + orderStream.map(new KeyValueMapper>() { + @Override + public KeyValue apply(String key, GenericRow value) { + List + newColumns = + Arrays.asList(value.getColumns().get(0), value.getColumns().get(1), + value.getColumns().get(3)); + GenericRow genericRow = new GenericRow(newColumns); + return new KeyValue(key, genericRow); + } + }); + return orderProject; + } + + private KTable, Long> orderUnitsPer10Seconds( + KStream sourceOrderStream) { + + long windowSizeMs = 1000L; + TimeWindows timeWindows = TimeWindows.of(10000); + KTable, Long> groupedStream = sourceOrderStream + .selectKey(new KeyValueMapper() { + @Override + public String apply(String key, GenericRow value) { + String newKey = value.getColumns().get(1).toString(); + return newKey; + } + }) + .groupByKey(Serdes.String(), getGenericRowSerde()) + .aggregate( + new Initializer() { + @Override + public Long apply() { + return 0L; + } + }, + new Aggregator() { + @Override + public Long apply(String aggKey, GenericRow value, Long aggregate) { + long val = Long.valueOf(value.getColumns().get(2).toString()); + return aggregate + val; + } + }, + timeWindows, + Serdes.Long(), + "SalesUnits" + ); + + return groupedStream; + + } + + private static Serde getGenericRowSerde() { + if (genericRowSerde == null) { + Map serdeProps = new HashMap<>(); + + final Serializer genericRowSerializer = new KsqlJsonSerializer(null); + serdeProps.put("JsonPOJOClass", GenericRow.class); + genericRowSerializer.configure(serdeProps, false); + + final Deserializer genericRowDeserializer = new KsqlJsonDeserializer(null); + serdeProps.put("JsonPOJOClass", GenericRow.class); + genericRowDeserializer.configure(serdeProps, false); + + genericRowSerde = Serdes.serdeFrom(genericRowSerializer, genericRowDeserializer); + } + return genericRowSerde; + } + + public void joinTest() { + Properties props = new Properties(); + props.put(StreamsConfig.APPLICATION_ID_CONFIG, + "StreamExampleGenericRowProcessor-" + System.currentTimeMillis()); + props.put(StreamsConfig.BOOTSTRAP_SERVERS_CONFIG, "localhost:9092"); + + // setting offset reset to earliest so that we can re-run the demo code with the same pre-loaded data + props.put(ConsumerConfig.AUTO_OFFSET_RESET_CONFIG, "earliest"); + props.put(StreamsConfig.CACHE_MAX_BYTES_BUFFERING_CONFIG, 100); + + KStreamBuilder builder = new KStreamBuilder(); + + KStream + pageviewStream = + builder.stream(Serdes.String(), getGenericRowSerde(), "streams-pageview-input"); + + KTable + usersTable = + builder.table(Serdes.String(), getGenericRowSerde(), "streams-userprofile-input", "users"); + + usersTable.mapValues(new ValueMapper() { + @Override + public GenericRow apply(GenericRow genericRow) { + System.out.println(genericRow.toString()); + return genericRow; + } + }); + + KafkaStreams streams = new KafkaStreams(builder, props); + streams.start(); + + // usually the stream application would be running forever, + // in this example we just let it run for some time and stop since the input data is finite. + try { + Thread.sleep(5000L); + } catch (InterruptedException e) { + e.printStackTrace(); + } + + streams.close(); + streams.cleanUp(); + } + + public static void main(String[] args) { + new JsonConsumer().printGenericRowTopic("ENRICHEDFEMALE_CSV"); + } +} diff --git a/ksql-examples/src/main/java/io/confluent/ksql/datagen/JsonProducer.java b/ksql-examples/src/main/java/io/confluent/ksql/datagen/JsonProducer.java new file mode 100644 index 000000000000..50fa5cee7767 --- /dev/null +++ b/ksql-examples/src/main/java/io/confluent/ksql/datagen/JsonProducer.java @@ -0,0 +1,21 @@ +/** + * Copyright 2017 Confluent Inc. + **/ +package io.confluent.ksql.datagen; + +import io.confluent.ksql.physical.GenericRow; +import io.confluent.ksql.serde.json.KsqlJsonSerializer; +import org.apache.avro.Schema; +import org.apache.kafka.common.serialization.Serializer; + +public class JsonProducer extends DataGenProducer { + + @Override + protected Serializer getSerializer( + Schema avroSchema, + org.apache.kafka.connect.data.Schema kafkaSchema, + String topicName + ) { + return new KsqlJsonSerializer(kafkaSchema); + } +} diff --git a/ksql-examples/src/main/java/io/confluent/ksql/datagen/SessionManager.java b/ksql-examples/src/main/java/io/confluent/ksql/datagen/SessionManager.java new file mode 100644 index 000000000000..6d22f206771f --- /dev/null +++ b/ksql-examples/src/main/java/io/confluent/ksql/datagen/SessionManager.java @@ -0,0 +1,139 @@ +package io.confluent.ksql.datagen; + +import java.util.ArrayList; +import java.util.Date; +import java.util.HashMap; +import java.util.Map; + +public class SessionManager { + + + private int maxSessionDurationSeconds = 30; + private int maxSessions = 5; + + public void setMaxSessionDurationSeconds(int maxSessionDurationSeconds) { + this.maxSessionDurationSeconds = maxSessionDurationSeconds; + } + + public void setMaxSessions(int maxSessions) { + this.maxSessions = maxSessions; + } + + public boolean isActive(String sessionId) { + return activeSessions.containsKey(sessionId); + } + public boolean isExpired(String sessionId) { + if (activeSessions.containsKey(sessionId)) { + SessionObject sessionObject = activeSessions.get(sessionId); + return sessionObject.isExpired(); + } + return expiredSessions.containsKey(sessionId); + } + public boolean isActiveAndExpire(String sessionId) { + boolean b = isActive(sessionId); + if (b) { + SessionObject sessionObject = activeSessions.get(sessionId); + if (sessionObject.isExpired()){ + System.out.println("***Expired:" + sessionId); + SessionObject removed = activeSessions.remove(sessionId); + expiredSessions.put(sessionId, removed); + return false; + } + + } + return b; + } + + + public void newSession(String sessionToken) { + if (activeSessions.containsKey(sessionToken)) throw new RuntimeException("Session" + sessionToken + " already exists"); + activeSessions.putIfAbsent(sessionToken, new SessionObject(maxSessionDurationSeconds)); + } + + public boolean isExpiredSession(String sessionId) { + return expiredSessions.containsKey(sessionId); + } + + public String recycleOldestExpired() { + Map.Entry oldest = null; + for (Map.Entry entry : expiredSessions.entrySet()) { + if (oldest == null || (entry.getValue().created < oldest.getValue().created)) { + oldest = entry; + } + } + if (oldest != null) { + expiredSessions.remove(oldest.getKey()); + return oldest.getKey(); + } + return null; + } + public String getRandomActiveToken(){ + int randomIndex = (int) (Math.random() * activeSessions.size()); + return new ArrayList(activeSessions.keySet()).get(randomIndex); + } + public String getActiveSessionThatHasExpired(){ + String expiredToken = null; + for (String s : activeSessions.keySet()) { + if (activeSessions.get(s).isExpired()) { + expiredToken = s; + } + } + if (expiredToken != null) { + expiredSessions.put(expiredToken, activeSessions.remove(expiredToken)); + } + + return expiredToken; + } + + public String getToken(String s) { + if (activeSessions.containsKey(s)) return s; + + // MaxedOut = then reuse active key + if (activeSessions.size() == maxSessions) { + int randomIndex = (int) (Math.random() * activeSessions.size()); + return new ArrayList(activeSessions.keySet()).get(randomIndex); + } + + // we have a new sessionId, =- if it is expired then we will allow reuse + if (expiredSessions.containsKey(s)) { + expiredSessions.remove(s); + return s; + } + + return s; + } + + public int getActiveSessionCount() { + return activeSessions.size(); + } + + public String randomActiveSession() { + if (activeSessions.size() == 0) return null; + return activeSessions.keySet().iterator().next(); + } + + public int getMaxSessions() { + return maxSessions; + } + + public static class SessionObject { + public SessionObject(int duration) { + this.sessionDurationSecs = duration; + } + long created = System.currentTimeMillis(); + private long sessionDurationSecs = 300; + + public boolean isExpired() { + return (System.currentTimeMillis() - created)/1000 > sessionDurationSecs; + } + + @Override + public String toString() { + return "Session:" + new Date(created).toString(); + } + } + + Map expiredSessions = new HashMap(); + Map activeSessions = new HashMap(); + +} diff --git a/ksql-examples/src/main/java/io/confluent/ksql/embedded/EmbeddedKsql.java b/ksql-examples/src/main/java/io/confluent/ksql/embedded/EmbeddedKsql.java new file mode 100644 index 000000000000..cd3ec25135fc --- /dev/null +++ b/ksql-examples/src/main/java/io/confluent/ksql/embedded/EmbeddedKsql.java @@ -0,0 +1,30 @@ +/** + * Copyright 2017 Confluent Inc. + **/ + +package io.confluent.ksql.embedded; + +import io.confluent.ksql.KsqlContext; + +public class EmbeddedKsql { + + public static void main(String[] args) throws Exception { + + KsqlContext ksqlContext = new KsqlContext(); + + ksqlContext.sql("REGISTER TOPIC orders_topic WITH (format = 'json', " + + "kafka_topic='orders_topic_json');"); + + ksqlContext.sql("CREATE STREAM orders (ordertime bigint, orderid bigint, itemid varchar, " + + "orderunits double, arraycol array, mapcol map) " + + "WITH (topicname = 'orders_topic' , key='orderid');\n"); + ksqlContext.sql("CREATE STREAM BIGORDERS AS SELECT * FROM ORDERS WHERE ORDERUNITS > 5;"); + ksqlContext.sql("SELECT * FROM ORDERS;"); + ksqlContext.sql("CREATE TABLE ORDERSUMS AS select itemid, sum(orderunits) from orders window " + + "TUMBLING ( size 30 second) group by itemid;"); + + System.out.println("Queries are running!"); + + } + +} diff --git a/ksql-examples/src/main/resources/SampleQueries.sql b/ksql-examples/src/main/resources/SampleQueries.sql new file mode 100644 index 000000000000..3ce2b90c63a6 --- /dev/null +++ b/ksql-examples/src/main/resources/SampleQueries.sql @@ -0,0 +1,26 @@ +REGISTER TOPIC users_topic WITH (value_format = 'json', kafka_topic='user_topic_json'); +REGISTER TOPIC pageview_topic WITH (value_format = 'json', kafka_topic='pageview_topic_json'); + +CREATE STREAM pageview (viewtime bigint, pageid varchar, userid varchar) WITH (registered_topic = 'pageview_topic'); +CREATE TABLE users (registertime bigint, userid varchar, regionid varchar, gender varchar) WITH (registered_topic = 'users_topic'); + + +-- Enrich the pageview stream +CREATE STREAM enrichedpv AS SELECT users.userid AS userid, pageid, regionid, gender FROM pageview LEFT JOIN users ON pageview.userid = users.userid; + +-- Find all the pageviews by female users +CREATE STREAM enrichedpv_female AS SELECT users.userid AS userid, pageid, regionid, gender FROM pageview LEFT JOIN users ON pageview.userid = users.userid WHERE gender = 'FEMALE'; + +-- Find the pageviews from reagion with id ending in _8 and _9 from the female pageview +CREATE STREAM enrichedpv_female_r8 AS SELECT * FROM enrichedpv_female WHERE regionid LIKE '%_8' OR regionid LIKE '%_9'; + +-- Number of views for each page for tumbling window of 5 seconds +CREATE TABLE pvcount AS SELECT pageid, count(*) from enrichedpv window tumbling (size 5 second) group by pageid; + +-- Number of views for each page for tumbling window of 5 minutes +CREATE TABLE pvcount_5min AS SELECT pageid, count(*) from enrichedpv window tumbling (size 5 minute) group by pageid; + + +-- Number of views for each each reagion and gender combination for tumbling window of 15 seconds +-- when the view count is greater than 5 +CREATE TABLE pvcount_gender_region AS SELECT gender, regionid , count(*) from enrichedpv window tumbling (size 15 second) group by gender, regionid having count(*) > 5; \ No newline at end of file diff --git a/ksql-examples/src/main/resources/clickstream_codes_schema.avro b/ksql-examples/src/main/resources/clickstream_codes_schema.avro new file mode 100644 index 000000000000..197daedad569 --- /dev/null +++ b/ksql-examples/src/main/resources/clickstream_codes_schema.avro @@ -0,0 +1,43 @@ +{ + "namespace": "clickstream", + "name": "codes", + "type": "record", + "fields": [ + { + "name": "code", + "type": "int" + }, + { + "name": "definition", + "type": "string" + } + ], + "arg.properties": { + "options": [ + { + "code": 200, + "definition": "Successful" + }, + { + "code": 302, + "definition": "Redirect" + }, + { + "code": 404, + "definition": "Page not found" + }, + { + "code": 405, + "definition": "Method not allowed" + }, + { + "code": 406, + "definition": "Not acceptable" + }, + { + "code": 407, + "definition": "Proxy authentication required" + } + ] + } +} diff --git a/ksql-examples/src/main/resources/clickstream_schema.avro b/ksql-examples/src/main/resources/clickstream_schema.avro new file mode 100644 index 000000000000..2d180fe6e842 --- /dev/null +++ b/ksql-examples/src/main/resources/clickstream_schema.avro @@ -0,0 +1,159 @@ +{ + "namespace": "clickstream", + "name": "events", + "type": "record", + "fields": [ + {"name": "ip", "type": { + "session" : "true", + "type": "string", + "arg.properties": { + "options": [ + "111.152.45.45", + "111.203.236.146", + "111.168.57.122", + "111.249.79.93", + "111.168.57.122", + "111.90.225.227", + "111.173.165.103", + "111.145.8.144", + "111.245.174.248", + + "222.152.45.45", + "222.203.236.146", + "222.168.57.122", + "222.249.79.93", + "222.168.57.122", + "222.90.225.227", + "222.173.165.103", + "222.145.8.144", + "222.245.174.248", + + "122.152.45.245", + "122.203.236.246", + "122.168.57.222", + "122.249.79.233", + "122.168.57.222", + "122.90.225.227", + "122.173.165.203", + "122.145.8.244", + "122.245.174.248", + + "233.152.245.45", + "233.203.236.146", + "233.168.257.122", + "233.249.279.93", + "233.168.257.122", + "233.90.225.227", + "233.173.215.103", + "233.145.28.144", + "233.245.174.248" + + + ] + } + }}, +// {"name": "userid", "type": { +// "type": "string", +// "arg.properties": { +// "options": [ +// "-" +// ] +// }}}, + {"name": "userid", "type": { + "type": "int", + "arg.properties": { + "range": { + "min": -1, + "max": 20 + } + } + }}, + {"name": "remote_user", "type": { + "type": "string", + "arg.properties": { + "options": [ + "-" + ] + }}}, + + + {"name": "time", "type": { + "type": "long", + "format_as_time" : "dd/MMM/yyyy:HH:mm:ss Z", + "arg.properties": { + "iteration": { "start": 1, "step": 10} + } + }}, + {"name": "_time", "type": { + "type": "long", + "format_as_time" : "unix_long", + "arg.properties": { + "iteration": { "start": 1, "step": 10} + } + }}, + + {"name": "request", "type": { + "type": "string", + "arg.properties": { + "options": [ + "GET /index.html HTTP/1.1", + "GET /site/user_status.html HTTP/1.1", + "GET /site/login.html HTTP/1.1", + "GET /site/user_status.html HTTP/1.1", + "GET /images/track.png HTTP/1.1", + "GET /images/logo-small.png HTTP/1.1" + ] + }}}, + //https://www.w3.org/Protocols/rfc2616/rfc2616-sec10.html + {"name": "status", "type": { + "type": "string", + "arg.properties": { + "options": [ + "200", + "302", + "404", + "405", // err codes + "406", + "407" + ] + } + }}, + + {"name": "bytes", "type": { + "type": "string", + "arg.properties": { + "options": [ + "278", + "1289", + "2048", + "4096", + "4006", + "4196", + "14096" + ] + } + }}, + + {"name": "referrer", "type": { + "type": "string", + "arg.properties": { + "options": [ + "-" + ] + } + }}, + + {"name": "agent", "type": { + "type": "string", + "arg.properties": { + "options": [ + "Mozilla/5.0 (compatible; Googlebot/2.1; +http://www.google.com/bot.html)", + "Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/59.0.3071.115 Safari/537.36" + ] + } + }} + + + + ] +} \ No newline at end of file diff --git a/ksql-examples/src/main/resources/clickstream_users_schema.avro b/ksql-examples/src/main/resources/clickstream_users_schema.avro new file mode 100644 index 000000000000..4aad7b1a0084 --- /dev/null +++ b/ksql-examples/src/main/resources/clickstream_users_schema.avro @@ -0,0 +1,85 @@ +{ + "namespace": "clickstream", + "name": "users", + "type": "record", + "fields": [ + {"name": "user_id", "type": { + "type": "int", + "arg.properties": { + "iteration": { + "start": 1 + } + } + }}, + {"name": "registered_at", "type": { + "type": "long", + "arg.properties": { + "range": { + "min": 1407645330000, + "max": 1502339792000 + } + } + }}, + {"name": "first_name", "type": { + "type": "string", + "arg.properties": { + "options": [ + "Elwyn", + "Curran", + "Hanson", + "Woodrow", + "Ferd", + "Reeva", + "Antonio", + "Arlyne", + "Dimitri", + "Oriana", + "Abdel", + "Greta" + ] + } + }}, + {"name": "last_name", "type": { + "type": "string", + "arg.properties": { + "options": [ + "Vanyard", + "Vears", + "Garrity", + "Trice", + "Tomini", + "Jushcke", + "De Banke", + "Pask", + "Rockhill", + "Romagosa", + "Adicot", + "Lalonde" + ] + } + }}, + {"name": "city", "type": { + "type": "string", + "arg.properties": { + "options": [ + "Palo Alto", + "San Francisco", + "Raleigh", + "London", + "Frankfurt", + "New York" + ] + } + }}, + {"name": "level", "type": { + "type": "string", + "arg.properties": { + "options": [ + "Gold", + "Silver", + "Platinum" + ] + } + }} + ] +} \ No newline at end of file diff --git a/ksql-examples/src/main/resources/ksql-server.properties b/ksql-examples/src/main/resources/ksql-server.properties new file mode 100644 index 000000000000..57dfb32229eb --- /dev/null +++ b/ksql-examples/src/main/resources/ksql-server.properties @@ -0,0 +1,9 @@ +# used when running bin/ksql-server +# submuit +bootstrap.servers=localhost:9092 +ksql.cluster.id=ksql_quickstart +command.topic.suffix=commands +num.stream.threads=4 +commit.interval.ms=2000 +cache.max.bytes.buffering=2000000 +listeners=http://localhost:8080 \ No newline at end of file diff --git a/ksql-examples/src/main/resources/orders_schema.avro b/ksql-examples/src/main/resources/orders_schema.avro new file mode 100644 index 000000000000..8146b73143f3 --- /dev/null +++ b/ksql-examples/src/main/resources/orders_schema.avro @@ -0,0 +1,39 @@ +{ + "namespace": "ksql", + "name": "orders", + "type": "record", + "fields": [ + {"name": "ordertime", "type": { + "type": "long", + "arg.properties": { + "range": { + "min": 1487715775521, + "max": 1519273364600 + } + } + }}, + {"name": "orderid", "type": { + "type": "int", + "arg.properties": { + "iteration": { + "start": 0 + } + } + }}, + {"name": "itemid", "type": { + "type": "string", + "arg.properties": { + "regex": "Item_[1-9][0-9]{0,2}" + } + }}, + {"name": "orderunits", "type": { + "type": "double", + "arg.properties": { + "range": { + "min": 0.1, + "max": 10.0 + } + } + }} + ] +} \ No newline at end of file diff --git a/ksql-examples/src/main/resources/pageviews_schema.avro b/ksql-examples/src/main/resources/pageviews_schema.avro new file mode 100644 index 000000000000..ec5f1031f08b --- /dev/null +++ b/ksql-examples/src/main/resources/pageviews_schema.avro @@ -0,0 +1,28 @@ +{ + "namespace": "ksql", + "name": "pageviews", + "type": "record", + "fields": [ + {"name": "viewtime", "type": { + "type": "long", + "arg.properties": { + "range": { + "min": 1487715775521, + "max": 1519273364600 + } + } + }}, + {"name": "userid", "type": { + "type": "string", + "arg.properties": { + "regex": "User_[1-9]{0,1}" + } + }}, + {"name": "pageid", "type": { + "type": "string", + "arg.properties": { + "regex": "Page_[1-9][0-9]?" + } + }} + ] +} \ No newline at end of file diff --git a/ksql-examples/src/main/resources/users_schema.avro b/ksql-examples/src/main/resources/users_schema.avro new file mode 100644 index 000000000000..0744ff9b5156 --- /dev/null +++ b/ksql-examples/src/main/resources/users_schema.avro @@ -0,0 +1,38 @@ +{ + "namespace": "ksql", + "name": "users", + "type": "record", + "fields": [ + {"name": "registertime", "type": { + "type": "long", + "arg.properties": { + "range": { + "min": 1487715775521, + "max": 1519273364600 + } + } + }}, + {"name": "userid", "type": { + "type": "string", + "arg.properties": { + "regex": "User_[1-9]{0,1}" + } + }}, + {"name": "regionid", "type": { + "type": "string", + "arg.properties": { + "regex": "Region_[1-9]?" + } + }}, + {"name": "gender", "type": { + "type": "string", + "arg.properties": { + "options": [ + "MALE", + "FEMALE", + "OTHER" + ] + } + }} + ] +} \ No newline at end of file diff --git a/ksql-examples/src/main/test/io/confluent/ksql/datagen/SessionManagerTest.java b/ksql-examples/src/main/test/io/confluent/ksql/datagen/SessionManagerTest.java new file mode 100644 index 000000000000..f5e99e0ea567 --- /dev/null +++ b/ksql-examples/src/main/test/io/confluent/ksql/datagen/SessionManagerTest.java @@ -0,0 +1,137 @@ +package io.confluent.ksql.datagen; + +import org.junit.After; +import org.junit.Before; +import org.junit.Test; + +import java.util.Arrays; +import java.util.HashSet; +import java.util.Set; + +import static org.junit.Assert.*; + +public class SessionManagerTest { + @Before + public void setUp() throws Exception { + } + + @After + public void tearDown() throws Exception { + } + + + @Test + public void sessionShouldForceTokenReUseWhenMaxedOut() throws InterruptedException { + + SessionManager sm = new SessionManager(); + + sm.setMaxSessionDurationSeconds(1); + sm.setMaxSessions(5); + + Set expectedSet = new HashSet(Arrays.asList( "0", "1", "2", "3", "4")); + + /** + * FillActiveSessions + */ + for (int i = 0; i < 5; i ++) { + + String token = sm.getToken(Integer.toString(i)); + assertTrue("Got Token:" + token, expectedSet.contains(token)); + sm.newSession(token); + } + } + + @Test + public void sessionShouldExpireAndReuse() throws InterruptedException { + + SessionManager sm = new SessionManager(); + + sm.setMaxSessionDurationSeconds(1); + sm.setMaxSessions(5); + + /** + * FillActiveSessions + */ + for (int i = 0; i < 5; i ++) { + sm.newSession(Integer.toString(i)); + } + + /** + * Expire them all + */ + Thread.sleep(2 * 1000); + + + /** + * reuse tokens + */ + for (int i = 0; i < 5; i ++) { + + // force expiration & check + boolean active = sm.isActiveAndExpire(Integer.toString(i)); + assertFalse(active); + + // want to re-use the oldest-existing session if we havent seen this before + boolean isRecycled = sm.isExpiredSession(Integer.toString(i)); + + assertTrue("Should be recycled session: " + i, isRecycled); + + String oldest = sm.recycleOldestExpired(); + + assertNotNull(oldest); + + sm.newSession(Integer.toString(i)); + } + + } + + + + @Test + public void isReturningOldestExpiredSession() throws InterruptedException { + + SessionManager sm = new SessionManager(); + sm.setMaxSessionDurationSeconds(1); + sm.newSession("1"); + Thread.sleep(200); + sm.newSession("2"); + Thread.sleep(2500); + + sm.isActiveAndExpire("1"); + sm.isActiveAndExpire("2"); + + + assertEquals("1", sm.recycleOldestExpired()); + + } + + + + + @Test + public void isActiveThenAddSession() throws InterruptedException { + + SessionManager sm = new SessionManager(); + String sessionToken = "not-active"; + assertFalse(sm.isActiveAndExpire(sessionToken)); + sm.newSession(sessionToken); + assertTrue(sm.isActiveAndExpire(sessionToken)); + + } + + @Test + public void doesSessionExpire() throws InterruptedException { + + SessionManager sm = new SessionManager(); + sm.setMaxSessionDurationSeconds(1); + String sessionToken = "active"; + sm.newSession(sessionToken); + assertTrue(sm.isActiveAndExpire(sessionToken)); + + Thread.sleep(2 * 1000); + + assertFalse(sm.isActiveAndExpire(sessionToken)); + } + + +} \ No newline at end of file diff --git a/ksql-rest-app/pom.xml b/ksql-rest-app/pom.xml new file mode 100644 index 000000000000..805c9f223032 --- /dev/null +++ b/ksql-rest-app/pom.xml @@ -0,0 +1,133 @@ + + + 4.0.0 + + + io.confluent.ksql + ksql-parent + 0.1-SNAPSHOT + + + ksql-rest-app + + + ${project.parent.basedir} + io.confluent.ksql.rest.server.KsqlRestApplication + false + ${main-class} + + + + + io.confluent.ksql + ksql-core + + + + io.confluent + kafka-json-serializer + + + + io.confluent + rest-utils + + + + com.github.rvesse + airline + + + + junit + junit + + + + org.apache.kafka + connect-json + + + + org.easymock + easymock + + + + + + + org.apache.maven.plugins + maven-assembly-plugin + + + src/assembly/development.xml + src/assembly/package.xml + src/assembly/standalone.xml + + + + ${main-class} + + + false + + + + make-assembly + package + + single + + + + + + + org.codehaus.mojo + exec-maven-plugin + ${exec-maven-plugin.version} + + + create-licenses + + io.confluent.licenses.LicenseFinder + + + -i ${project.build.directory}/${project.build.finalName}-package/share/java/${artifactId} + -f + -h ${project.build.directory}/${project.build.finalName}-package/share/doc/${project.artifactId}/licenses.html + -l ${project.build.directory}/${project.build.finalName}-package/share/doc/${project.artifactId}/licenses + -n ${project.build.directory}/${project.build.finalName}-package/share/doc/${project.artifactId}/notices + -x licenses-${project.version}.jar + + + package + + java + + + + + true + true + + io.confluent + licenses + + + + + io.confluent + licenses + ${licenses.version} + + + + + + + diff --git a/ksql-rest-app/src/assembly/development.xml b/ksql-rest-app/src/assembly/development.xml new file mode 100644 index 000000000000..1180bf732fcf --- /dev/null +++ b/ksql-rest-app/src/assembly/development.xml @@ -0,0 +1,41 @@ + + + development + + dir + + false + + + ${project.parent.basedir} + share/doc/ksql-rest-app/ + + README* + COPYRIGHT* + + + + ${project.parent.basedir} + + + bin/* + + + + ${project.parent.basedir}/config + etc/ksql-rest-app + + * + + + + + + share/java/ksql-rest-app/ + + + diff --git a/ksql-rest-app/src/assembly/package.xml b/ksql-rest-app/src/assembly/package.xml new file mode 100644 index 000000000000..2aed09d241a1 --- /dev/null +++ b/ksql-rest-app/src/assembly/package.xml @@ -0,0 +1,54 @@ + + + package + + dir + + false + + + ${project.parent.basedir} + share/doc/ksql-rest-app/ + + version.txt + COPYRIGHT* + + + + ${project.parent.basedir} + + + bin/* + + + + ${project.parent.basedir}/config + etc/ksql-rest-app + + * + + + + + + share/java/ksql-rest-app + true + + true + + io.confluent:rest-utils + io.confluent:common-* + com.google.guava:guava + + + + diff --git a/ksql-rest-app/src/assembly/standalone.xml b/ksql-rest-app/src/assembly/standalone.xml new file mode 100644 index 000000000000..eda40e689eda --- /dev/null +++ b/ksql-rest-app/src/assembly/standalone.xml @@ -0,0 +1,45 @@ + + + standalone + + jar + + false + + + ${project.parent.basedir} + / + + README* + COPYRIGHT* + + + + + + / + true + true + runtime + + ${project.groupId}:${project.artifactId} + + + + + / + false + true + runtime + + + log4j.properties + + + + + diff --git a/ksql-rest-app/src/main/java/io/confluent/ksql/rest/client/KsqlRestClient.java b/ksql-rest-app/src/main/java/io/confluent/ksql/rest/client/KsqlRestClient.java new file mode 100644 index 000000000000..d7c2eac5a132 --- /dev/null +++ b/ksql-rest-app/src/main/java/io/confluent/ksql/rest/client/KsqlRestClient.java @@ -0,0 +1,229 @@ +/** + * Copyright 2017 Confluent Inc. + **/ + +package io.confluent.ksql.rest.client; + +import com.fasterxml.jackson.databind.ObjectMapper; +import io.confluent.ksql.rest.entity.CommandStatus; +import io.confluent.ksql.rest.entity.CommandStatuses; +import io.confluent.ksql.rest.entity.ErrorMessage; +import io.confluent.ksql.rest.entity.KsqlEntityList; +import io.confluent.ksql.rest.entity.KsqlRequest; +import io.confluent.ksql.rest.entity.SchemaMapper; +import io.confluent.ksql.rest.entity.ServerInfo; +import io.confluent.ksql.rest.entity.StreamedRow; +import io.confluent.rest.validation.JacksonMessageBodyProvider; + +import javax.ws.rs.client.Client; +import javax.ws.rs.client.ClientBuilder; +import javax.ws.rs.client.Entity; +import javax.ws.rs.core.MediaType; +import javax.ws.rs.core.Response; +import java.io.Closeable; +import java.io.IOException; +import java.io.InputStream; +import java.util.HashMap; +import java.util.Iterator; +import java.util.Map; +import java.util.NoSuchElementException; +import java.util.Scanner; + +public class KsqlRestClient implements Closeable, AutoCloseable { + + private final Client client; + + private String serverAddress; + + private final Map localProperties; + + public KsqlRestClient(String serverAddress) { + this.serverAddress = serverAddress; + this.localProperties = new HashMap<>(); + ObjectMapper objectMapper = new SchemaMapper().registerToObjectMapper(new ObjectMapper()); + JacksonMessageBodyProvider jsonProvider = new JacksonMessageBodyProvider(objectMapper); + this.client = ClientBuilder.newBuilder().register(jsonProvider).build(); + } + + public KsqlRestClient(String serverAddress, Map localProperties) { + this.serverAddress = serverAddress; + this.localProperties = localProperties; + ObjectMapper objectMapper = new SchemaMapper().registerToObjectMapper(new ObjectMapper()); + JacksonMessageBodyProvider jsonProvider = new JacksonMessageBodyProvider(objectMapper); + this.client = ClientBuilder.newBuilder().register(jsonProvider).build(); + } + + public String getServerAddress() { + return serverAddress; + } + + public void setServerAddress(String serverAddress) { + this.serverAddress = serverAddress; + } + + public RestResponse makeRootRequest() { + Response response = makeGetRequest("/"); + ServerInfo result = response.readEntity(ServerInfo.class); + response.close(); + return RestResponse.successful(result); + } + + public RestResponse makeKsqlRequest(String ksql) { + KsqlRequest jsonRequest = new KsqlRequest(ksql, localProperties); + Response response = makePostRequest("ksql", jsonRequest); + KsqlEntityList result = response.readEntity(KsqlEntityList.class); + response.close(); + return RestResponse.successful(result); + } + + public RestResponse makeStatusRequest() { + Response response = makeGetRequest("status"); + CommandStatuses result = response.readEntity(CommandStatuses.class); + response.close(); + return RestResponse.successful(result); + } + + public RestResponse makeStatusRequest(String commandId) { + RestResponse result; + Response response = makeGetRequest(String.format("status/%s", commandId)); + if (response.getStatus() == Response.Status.OK.getStatusCode()) { + result = RestResponse.successful(response.readEntity(CommandStatus.class)); + } else { + result = RestResponse.erroneous(response.readEntity(ErrorMessage.class)); + } + response.close(); + return result; + } + + public RestResponse makeQueryRequest(String ksql) { + KsqlRequest jsonRequest = new KsqlRequest(ksql, localProperties); + Response response = makePostRequest("query", jsonRequest); + if (response.getStatus() == Response.Status.OK.getStatusCode()) { + return RestResponse.successful(new QueryStream(response)); + } else { + return RestResponse.erroneous(response.readEntity(ErrorMessage.class)); + } + } + + public RestResponse makePrintTopicRequest( + String ksql + ) { + RestResponse result; + KsqlRequest jsonRequest = new KsqlRequest(ksql, localProperties); + Response response = makePostRequest("query", jsonRequest); + if (response.getStatus() == Response.Status.OK.getStatusCode()) { + result = RestResponse.successful((InputStream) response.getEntity()); + } else { + result = RestResponse.erroneous(response.readEntity(ErrorMessage.class)); + } + return result; + } + + @Override + public void close() { + client.close(); + } + + private Response makePostRequest(String path, Object jsonEntity) { + return client.target(serverAddress) + .path(path) + .request(MediaType.APPLICATION_JSON_TYPE) + .post(Entity.json(jsonEntity)); + } + + private Response makeGetRequest(String path) { + return client.target(serverAddress).path(path) + .request(MediaType.APPLICATION_JSON_TYPE) + .get(); + } + + public static class QueryStream implements Closeable, AutoCloseable, Iterator { + private final Response response; + private final ObjectMapper objectMapper; + private final Scanner responseScanner; + + private StreamedRow bufferedRow; + private boolean closed; + + public QueryStream(Response response) { + this.response = response; + + this.objectMapper = new ObjectMapper(); + this.responseScanner = new Scanner((InputStream) response.getEntity()); + + this.bufferedRow = null; + this.closed = false; + } + + @Override + public boolean hasNext() { + if (closed) { + throw new IllegalStateException("Cannot call hasNext() once closed"); + } + + if (bufferedRow != null) { + return true; + } + + while (responseScanner.hasNextLine()) { + String responseLine = responseScanner.nextLine().trim(); + if (!responseLine.isEmpty()) { + try { + bufferedRow = objectMapper.readValue(responseLine, StreamedRow.class); + } catch (IOException exception) { + // TODO: Should the exception be handled somehow else? + // Swallowing it silently seems like a bad idea... + throw new RuntimeException(exception); + } + return true; + } + } + + return false; + } + + @Override + public StreamedRow next() { + if (closed) { + throw new IllegalStateException("Cannot call next() once closed"); + } + + if (!hasNext()) { + throw new NoSuchElementException(); + } + + StreamedRow result = bufferedRow; + bufferedRow = null; + return result; + } + + @Override + public void close() { + if (closed) { + throw new IllegalStateException("Cannot call close() when already closed"); + } + + closed = true; + responseScanner.close(); + response.close(); + } + } + + public Map getLocalProperties() { + return localProperties; + } + + public Object setProperty(String property, Object value) { + Object oldValue = localProperties.get(property); + localProperties.put(property, value); + return oldValue; + } + + public boolean unsetProperty(String property) { + if (localProperties.containsKey(property)) { + Object value = localProperties.remove(property); + return true; + } + return false; + } +} diff --git a/ksql-rest-app/src/main/java/io/confluent/ksql/rest/client/RestResponse.java b/ksql-rest-app/src/main/java/io/confluent/ksql/rest/client/RestResponse.java new file mode 100644 index 000000000000..43280bbd9374 --- /dev/null +++ b/ksql-rest-app/src/main/java/io/confluent/ksql/rest/client/RestResponse.java @@ -0,0 +1,120 @@ +/** + * Copyright 2017 Confluent Inc. + **/ + +package io.confluent.ksql.rest.client; + +import io.confluent.ksql.rest.entity.ErrorMessage; + +import java.util.Objects; + +// Don't tell anybody, but this is basically Haskell's Either datatype... +// I swear it seemed like the best way to do things +public abstract class RestResponse { + private RestResponse() { } + + public abstract boolean isSuccessful(); + + public abstract boolean isErroneous(); + + public abstract ErrorMessage getErrorMessage(); + + public abstract R getResponse(); + + public static RestResponse erroneous(ErrorMessage errorMessage) { + return new Erroneous<>(errorMessage); + } + + public static RestResponse successful(R response) { + return new Successful<>(response); + } + + public static RestResponse of(ErrorMessage errorMessage) { + return erroneous(errorMessage); + } + + public static RestResponse of(R response) { + return successful(response); + } + + public Object get() { + if (isSuccessful()) { + return getResponse(); + } else { + return getErrorMessage(); + } + } + + private static class Erroneous extends RestResponse { + private final ErrorMessage errorMessage; + + public Erroneous(ErrorMessage errorMessage) { + this.errorMessage = errorMessage; + } + + @Override + public boolean isSuccessful() { + return false; + } + + @Override + public boolean isErroneous() { + return true; + } + + @Override + public ErrorMessage getErrorMessage() { + return errorMessage; + } + + @Override + public R getResponse() { + throw new UnsupportedOperationException(); + } + } + + private static class Successful extends RestResponse { + private final R response; + + public Successful(R response) { + this.response = response; + } + + @Override + public boolean isSuccessful() { + return true; + } + + @Override + public boolean isErroneous() { + return false; + } + + @Override + public ErrorMessage getErrorMessage() { + throw new UnsupportedOperationException(); + } + + @Override + public R getResponse() { + return response; + } + + @Override + public boolean equals(Object o) { + if (this == o) { + return true; + } + if (!(o instanceof Successful)) { + return false; + } + Successful that = (Successful) o; + return Objects.equals(getResponse(), that.getResponse()); + } + + @Override + public int hashCode() { + return Objects.hash(getResponse()); + } + } +} diff --git a/ksql-rest-app/src/main/java/io/confluent/ksql/rest/entity/CommandStatus.java b/ksql-rest-app/src/main/java/io/confluent/ksql/rest/entity/CommandStatus.java new file mode 100644 index 000000000000..4e5a685399b8 --- /dev/null +++ b/ksql-rest-app/src/main/java/io/confluent/ksql/rest/entity/CommandStatus.java @@ -0,0 +1,58 @@ +/** + * Copyright 2017 Confluent Inc. + **/ + +package io.confluent.ksql.rest.entity; + +import com.fasterxml.jackson.annotation.JsonCreator; +import com.fasterxml.jackson.annotation.JsonProperty; +import com.fasterxml.jackson.annotation.JsonTypeInfo; +import com.fasterxml.jackson.annotation.JsonTypeName; + +import java.util.Objects; + +@JsonTypeName("commandStatus") +@JsonTypeInfo( + include = JsonTypeInfo.As.WRAPPER_OBJECT, + use = JsonTypeInfo.Id.NAME +) +public class CommandStatus { + public enum Status { QUEUED, PARSING, EXECUTING, RUNNING, TERMINATED, SUCCESS, ERROR } + + private final Status status; + private final String message; + + @JsonCreator + public CommandStatus( + @JsonProperty("status") Status status, + @JsonProperty("message") String message) { + this.status = status; + this.message = message; + } + + public Status getStatus() { + return status; + } + + public String getMessage() { + return message; + } + + @Override + public boolean equals(Object o) { + if (this == o) { + return true; + } + if (!(o instanceof CommandStatus)) { + return false; + } + CommandStatus that = (CommandStatus) o; + return getStatus() == that.getStatus() + && Objects.equals(getMessage(), that.getMessage()); + } + + @Override + public int hashCode() { + return Objects.hash(getStatus(), getMessage()); + } +} diff --git a/ksql-rest-app/src/main/java/io/confluent/ksql/rest/entity/CommandStatusEntity.java b/ksql-rest-app/src/main/java/io/confluent/ksql/rest/entity/CommandStatusEntity.java new file mode 100644 index 000000000000..4317fb8aed6f --- /dev/null +++ b/ksql-rest-app/src/main/java/io/confluent/ksql/rest/entity/CommandStatusEntity.java @@ -0,0 +1,99 @@ +/** + * Copyright 2017 Confluent Inc. + **/ + +package io.confluent.ksql.rest.entity; + +import com.fasterxml.jackson.annotation.JsonCreator; +import com.fasterxml.jackson.annotation.JsonTypeInfo; +import com.fasterxml.jackson.annotation.JsonTypeName; +import com.fasterxml.jackson.annotation.JsonUnwrapped; +import io.confluent.ksql.rest.server.computation.CommandId; + +import java.util.Map; +import java.util.Objects; + +@JsonTypeName("currentStatus") +public class CommandStatusEntity extends KsqlEntity { + private final CommandId commandId; + private final CommandStatus commandStatus; + + public CommandStatusEntity( + String statementText, + CommandId commandId, + CommandStatus commandStatus + ) { + super(statementText); + this.commandId = commandId; + this.commandStatus = commandStatus; + } + + // Commented-out now due to Jackson issues with deserializing unwrapped values; may be possible + // to do things this way once Jackson 2.9.0 comes out but until then have to stick with the + // Map constructor hack + // @JsonCreator + // public CommandStatusEntity( + // @JsonProperty("statementText") String statementText, + // @JsonProperty("commandId") String commandId, + // @JsonProperty("status") String status, + // @JsonProperty("message") String message + // ) { + // this( + // statementText, + // CommandId.fromString(commandId), + // new CommandStatus(CommandStatus.Status.valueOf(status), message) + // ); + // } + + public CommandStatusEntity( + String statementText, + String commandId, + String status, + String message + ) { + this( + statementText, + CommandId.fromString(commandId), + new CommandStatus(CommandStatus.Status.valueOf(status), message) + ); + } + + @JsonCreator + public CommandStatusEntity(Map properties) { + this( + (String) properties.get("statementText"), + (String) properties.get("commandId"), + (String) properties.get("status"), + (String) properties.get("message") + ); + } + + @JsonUnwrapped + public CommandId getCommandId() { + return commandId; + } + + @JsonTypeInfo(use = JsonTypeInfo.Id.NONE) + @JsonUnwrapped + public CommandStatus getCommandStatus() { + return commandStatus; + } + + @Override + public boolean equals(Object o) { + if (this == o) { + return true; + } + if (!(o instanceof CommandStatusEntity)) { + return false; + } + CommandStatusEntity that = (CommandStatusEntity) o; + return Objects.equals(getCommandId(), that.getCommandId()) + && Objects.equals(getCommandStatus(), that.getCommandStatus()); + } + + @Override + public int hashCode() { + return Objects.hash(getCommandId(), getCommandStatus()); + } +} diff --git a/ksql-rest-app/src/main/java/io/confluent/ksql/rest/entity/CommandStatuses.java b/ksql-rest-app/src/main/java/io/confluent/ksql/rest/entity/CommandStatuses.java new file mode 100644 index 000000000000..89df76cd1e16 --- /dev/null +++ b/ksql-rest-app/src/main/java/io/confluent/ksql/rest/entity/CommandStatuses.java @@ -0,0 +1,34 @@ +/** + * Copyright 2017 Confluent Inc. + **/ + +package io.confluent.ksql.rest.entity; + +import com.fasterxml.jackson.annotation.JsonCreator; +import com.fasterxml.jackson.annotation.JsonTypeInfo; +import com.fasterxml.jackson.annotation.JsonTypeName; +import io.confluent.ksql.rest.server.computation.CommandId; + +import java.util.HashMap; +import java.util.Map; +import java.util.stream.Collectors; + +@JsonTypeName("commandStatuses") +@JsonTypeInfo( + include = JsonTypeInfo.As.WRAPPER_OBJECT, + use = JsonTypeInfo.Id.NAME +) +public class CommandStatuses extends HashMap { + + @JsonCreator + public CommandStatuses(Map statuses) { + super(statuses); + } + + public static CommandStatuses fromFullStatuses(Map fullStatuses) { + Map statuses = fullStatuses.entrySet().stream().collect( + Collectors.toMap(Map.Entry::getKey, entry -> entry.getValue().getStatus()) + ); + return new CommandStatuses(statuses); + } +} diff --git a/ksql-rest-app/src/main/java/io/confluent/ksql/rest/entity/ErrorMessage.java b/ksql-rest-app/src/main/java/io/confluent/ksql/rest/entity/ErrorMessage.java new file mode 100644 index 000000000000..22e2d32fad29 --- /dev/null +++ b/ksql-rest-app/src/main/java/io/confluent/ksql/rest/entity/ErrorMessage.java @@ -0,0 +1,78 @@ +/** + * Copyright 2017 Confluent Inc. + **/ + +package io.confluent.ksql.rest.entity; + +import com.fasterxml.jackson.annotation.JsonCreator; +import com.fasterxml.jackson.annotation.JsonProperty; + +import java.util.ArrayList; +import java.util.Arrays; +import java.util.List; +import java.util.Objects; +import java.util.stream.Collectors; + +// TODO: Add a field for status code +public class ErrorMessage { + + private final String message; + private final List stackTrace; + + @JsonCreator + public ErrorMessage( + @JsonProperty("message") String message, + @JsonProperty("stackTrace") List stackTrace + ) { + this.message = message; + this.stackTrace = stackTrace; + } + + public ErrorMessage(Throwable exception) { + this(exception.getMessage(), getStackTraceStrings(exception)); + } + + public static List getStackTraceStrings(Throwable exception) { + return Arrays.stream(exception.getStackTrace()) + .map(StackTraceElement::toString) + .collect(Collectors.toList()); + } + + public String getMessage() { + return message; + } + + public List getStackTrace() { + return new ArrayList<>(stackTrace); + } + + @Override + public String toString() { + StringBuilder sb = new StringBuilder(); + sb.append(message); + sb.append("\n"); + for (String line : stackTrace) { + sb.append(line); + sb.append("\n"); + } + return sb.toString(); + } + + @Override + public boolean equals(Object o) { + if (this == o) { + return true; + } + if (!(o instanceof ErrorMessage)) { + return false; + } + ErrorMessage that = (ErrorMessage) o; + return Objects.equals(getMessage(), that.getMessage()) + && Objects.equals(getStackTrace(), that.getStackTrace()); + } + + @Override + public int hashCode() { + return Objects.hash(getMessage(), getStackTrace()); + } +} \ No newline at end of file diff --git a/ksql-rest-app/src/main/java/io/confluent/ksql/rest/entity/ErrorMessageEntity.java b/ksql-rest-app/src/main/java/io/confluent/ksql/rest/entity/ErrorMessageEntity.java new file mode 100644 index 000000000000..d5cda70f233f --- /dev/null +++ b/ksql-rest-app/src/main/java/io/confluent/ksql/rest/entity/ErrorMessageEntity.java @@ -0,0 +1,53 @@ +/** + * Copyright 2017 Confluent Inc. + **/ + +package io.confluent.ksql.rest.entity; + +import com.fasterxml.jackson.annotation.JsonCreator; +import com.fasterxml.jackson.annotation.JsonProperty; +import com.fasterxml.jackson.annotation.JsonTypeName; +import com.fasterxml.jackson.annotation.JsonUnwrapped; + +import java.util.Objects; + +@JsonTypeName("error") +public class ErrorMessageEntity extends KsqlEntity { + + private final ErrorMessage errorMessage; + + @JsonCreator + public ErrorMessageEntity( + @JsonProperty("statementText") String statementText, + @JsonProperty("errorMessage") ErrorMessage errorMessage + ) { + super(statementText); + this.errorMessage = errorMessage; + } + + public ErrorMessageEntity(String statementText, Throwable exception) { + this(statementText, new ErrorMessage(exception)); + } + + @JsonUnwrapped + public ErrorMessage getErrorMessage() { + return errorMessage; + } + + @Override + public boolean equals(Object o) { + if (this == o) { + return true; + } + if (!(o instanceof ErrorMessageEntity)) { + return false; + } + ErrorMessageEntity that = (ErrorMessageEntity) o; + return Objects.equals(getErrorMessage(), that.getErrorMessage()); + } + + @Override + public int hashCode() { + return Objects.hash(getErrorMessage()); + } +} diff --git a/ksql-rest-app/src/main/java/io/confluent/ksql/rest/entity/ExecutionPlan.java b/ksql-rest-app/src/main/java/io/confluent/ksql/rest/entity/ExecutionPlan.java new file mode 100644 index 000000000000..4a2556bd30f3 --- /dev/null +++ b/ksql-rest-app/src/main/java/io/confluent/ksql/rest/entity/ExecutionPlan.java @@ -0,0 +1,44 @@ +/** + * Copyright 2017 Confluent Inc. + **/ + +package io.confluent.ksql.rest.entity; + +import com.fasterxml.jackson.annotation.JsonCreator; +import com.fasterxml.jackson.annotation.JsonProperty; +import com.fasterxml.jackson.annotation.JsonTypeName; + +import java.util.Objects; + +@JsonTypeName("executionPlan") +public class ExecutionPlan extends KsqlEntity { + + private final String executionPlan; + + @JsonCreator + public ExecutionPlan(@JsonProperty("executionPlanText") String executionPlan) { + super(executionPlan); + this.executionPlan = executionPlan; + } + + public String getExecutionPlan() { + return executionPlan; + } + + @Override + public boolean equals(Object o) { + if (this == o) { + return true; + } + if (!(o instanceof ExecutionPlan)) { + return false; + } + ExecutionPlan executionPlan = (ExecutionPlan) o; + return Objects.equals(getExecutionPlan(), executionPlan.getExecutionPlan()); + } + + @Override + public int hashCode() { + return Objects.hash(getExecutionPlan()); + } +} diff --git a/ksql-rest-app/src/main/java/io/confluent/ksql/rest/entity/KafkaTopicInfo.java b/ksql-rest-app/src/main/java/io/confluent/ksql/rest/entity/KafkaTopicInfo.java new file mode 100644 index 000000000000..a44ad8f8aeb3 --- /dev/null +++ b/ksql-rest-app/src/main/java/io/confluent/ksql/rest/entity/KafkaTopicInfo.java @@ -0,0 +1,63 @@ +/** + * Copyright 2017 Confluent Inc. + **/ + +package io.confluent.ksql.rest.entity; + +import com.fasterxml.jackson.annotation.JsonCreator; +import com.fasterxml.jackson.annotation.JsonProperty; + +import java.util.Objects; + +public class KafkaTopicInfo { + + private final String name; + private final String registered; + private final String partitionCount; + private final String replicaInfo; + + @JsonCreator + public KafkaTopicInfo( + @JsonProperty("name") String name, + @JsonProperty("registered") String registered, + @JsonProperty("partitionCount") String partitionCount, + @JsonProperty("replicaInfo") String replicaInfo + ) { + this.name = name; + this.registered = registered; + this.partitionCount = partitionCount; + this.replicaInfo = replicaInfo; + } + + public String getName() { + return name; + } + + public String getRegistered() { + return registered; + } + + public String getPartitionCount() { + return partitionCount; + } + + public String getReplicaInfo() { + return replicaInfo; + } + + @Override + public boolean equals(Object o) { + if (this == o) return true; + if (o == null || getClass() != o.getClass()) return false; + KafkaTopicInfo that = (KafkaTopicInfo) o; + return Objects.equals(name, that.name) && + Objects.equals(partitionCount, that.partitionCount) && + Objects.equals(replicaInfo, that.replicaInfo) && + Objects.equals(registered, that.registered); + } + + @Override + public int hashCode() { + return Objects.hash(name, partitionCount, replicaInfo, registered); + } +} \ No newline at end of file diff --git a/ksql-rest-app/src/main/java/io/confluent/ksql/rest/entity/KafkaTopicsList.java b/ksql-rest-app/src/main/java/io/confluent/ksql/rest/entity/KafkaTopicsList.java new file mode 100644 index 000000000000..fc8e2500bd71 --- /dev/null +++ b/ksql-rest-app/src/main/java/io/confluent/ksql/rest/entity/KafkaTopicsList.java @@ -0,0 +1,134 @@ +/** + * Copyright 2017 Confluent Inc. + **/ + +package io.confluent.ksql.rest.entity; + +import com.fasterxml.jackson.annotation.JsonCreator; +import com.fasterxml.jackson.annotation.JsonProperty; +import com.fasterxml.jackson.annotation.JsonTypeName; +import com.fasterxml.jackson.annotation.JsonUnwrapped; +import io.confluent.ksql.metastore.KsqlTopic; +import io.confluent.ksql.util.KsqlConfig; +import io.confluent.ksql.util.StringUtil; +import org.apache.kafka.clients.admin.TopicDescription; +import org.apache.kafka.common.TopicPartitionInfo; + +import java.util.ArrayList; +import java.util.Arrays; +import java.util.Collection; +import java.util.HashMap; +import java.util.HashSet; +import java.util.List; +import java.util.Map; +import java.util.Objects; +import java.util.Set; +import java.util.TreeMap; + +@JsonTypeName("kafka_topics") +public class KafkaTopicsList extends KsqlEntity { + private final Collection topics; + + @JsonCreator + public KafkaTopicsList( + @JsonProperty("statementText") String statementText, + @JsonProperty("kafka_topics") Collection topics + ) { + super(statementText); + this.topics = topics; + } + + @JsonUnwrapped + public List getTopics() { + return new ArrayList<>(topics); + } + + @Override + public boolean equals(Object o) { + if (this == o) { + return true; + } + if (!(o instanceof KafkaTopicsList)) { + return false; + } + KafkaTopicsList that = (KafkaTopicsList) o; + return Objects.equals(getTopics(), that.getTopics()); + } + + @Override + public int hashCode() { + return Objects.hash(getTopics()); + } + + public static KafkaTopicsList build(String statementText, + Collection ksqlTopics, + Map kafkaTopicDescriptions, + KsqlConfig ksqlConfig) { + Set registeredNames = getRegisteredKafkaTopicNames(ksqlTopics); + + List kafkaTopicInfoList = new ArrayList<>(); + kafkaTopicDescriptions = new TreeMap<>(filterKsqlInternalTopics(kafkaTopicDescriptions, + ksqlConfig)); + for (TopicDescription desp: kafkaTopicDescriptions.values()) { + kafkaTopicInfoList.add(new KafkaTopicInfo( + desp.name(), + String.valueOf(registeredNames.contains(desp.name())), + String.valueOf(desp.partitions().size()), + String.valueOf(getTopicReplicaInfo(desp.partitions())) + )); + } + return new KafkaTopicsList(statementText, kafkaTopicInfoList); + } + + private static Set getRegisteredKafkaTopicNames(Collection ksqlTopics) { + Set registeredNames = new HashSet<>(); + for (KsqlTopic ksqlTopic: ksqlTopics) { + registeredNames.add(ksqlTopic.getKafkaTopicName()); + } + return registeredNames; + } + + private static String getTopicReplicaInfo(List partitions) { + int[] replicaSizes = new int[partitions.size()]; + + for (int i = 0; i < partitions.size(); i++) { + replicaSizes[i] = partitions.get(i).replicas().size(); + } + + boolean sameReplicaCount = true; + for (int i = 1; i < partitions.size(); i++) { + if (replicaSizes[i] != replicaSizes[i-1]) { + sameReplicaCount = false; + break; + } + } + + if (sameReplicaCount) { + return partitions.size() == 0 ? "0" : String.valueOf(replicaSizes[0]); + } else { + return StringUtil.join(", ", Arrays.asList(replicaSizes)); + } + } + + private static Map filterKsqlInternalTopics( + Map kafkaTopicDescriptions, KsqlConfig ksqlConfig) { + Map filteredKafkaTopics = new HashMap<>(); + String serviceId = ksqlConfig.get(KsqlConfig.KSQL_SERVICE_ID_CONFIG) + .toString(); + String persistentQueryPrefix = ksqlConfig.get(KsqlConfig + .KSQL_PERSISTENT_QUERY_NAME_PREFIX_CONFIG) + .toString(); + String transientQueryPrefix = ksqlConfig.get(KsqlConfig + .KSQL_TRANSIENT_QUERY_NAME_PREFIX_CONFIG) + .toString(); + + for (String kafkaTopicName: kafkaTopicDescriptions.keySet()) { + if (!kafkaTopicName.startsWith(serviceId + persistentQueryPrefix) && + !kafkaTopicName.startsWith(serviceId + transientQueryPrefix)) { + filteredKafkaTopics.put(kafkaTopicName.toLowerCase(), kafkaTopicDescriptions.get(kafkaTopicName)); + } + } + return filteredKafkaTopics; + } + +} diff --git a/ksql-rest-app/src/main/java/io/confluent/ksql/rest/entity/KsqlEntity.java b/ksql-rest-app/src/main/java/io/confluent/ksql/rest/entity/KsqlEntity.java new file mode 100644 index 000000000000..03e5b5db8906 --- /dev/null +++ b/ksql-rest-app/src/main/java/io/confluent/ksql/rest/entity/KsqlEntity.java @@ -0,0 +1,37 @@ +/** + * Copyright 2017 Confluent Inc. + **/ + +package io.confluent.ksql.rest.entity; + +import com.fasterxml.jackson.annotation.JsonSubTypes; +import com.fasterxml.jackson.annotation.JsonTypeInfo; + +@JsonTypeInfo( + use = JsonTypeInfo.Id.NAME, + include = JsonTypeInfo.As.WRAPPER_OBJECT +) +@JsonSubTypes({ + @JsonSubTypes.Type(value = CommandStatusEntity.class, name = "currentStatus"), + @JsonSubTypes.Type(value = ErrorMessageEntity.class, name = "error"), + @JsonSubTypes.Type(value = PropertiesList.class, name = "properties"), + @JsonSubTypes.Type(value = Queries.class, name = "queries"), + @JsonSubTypes.Type(value = SourceDescription.class, name = "description"), + @JsonSubTypes.Type(value = TopicDescription.class, name = "topic_description"), + @JsonSubTypes.Type(value = StreamsList.class, name = "streams"), + @JsonSubTypes.Type(value = TablesList.class, name = "tables"), + @JsonSubTypes.Type(value = KsqlTopicsList.class, name = "ksql_topics"), + @JsonSubTypes.Type(value = KafkaTopicsList.class, name = "kafka_topics"), + @JsonSubTypes.Type(value = ExecutionPlan.class, name = "executionPlan") +}) +public abstract class KsqlEntity { + private final String statementText; + + public KsqlEntity(String statementText) { + this.statementText = statementText; + } + + public String getStatementText() { + return statementText; + } +} diff --git a/ksql-rest-app/src/main/java/io/confluent/ksql/rest/entity/KsqlEntityList.java b/ksql-rest-app/src/main/java/io/confluent/ksql/rest/entity/KsqlEntityList.java new file mode 100644 index 000000000000..1d922a2694ed --- /dev/null +++ b/ksql-rest-app/src/main/java/io/confluent/ksql/rest/entity/KsqlEntityList.java @@ -0,0 +1,25 @@ +/** + * Copyright 2017 Confluent Inc. + **/ + +package io.confluent.ksql.rest.entity; + +import java.util.ArrayList; +import java.util.Collection; + +/** + * Utility class to prevent type erasure from stripping annotation information from KsqlEntity + * instances in a list + */ +public class KsqlEntityList extends ArrayList { + public KsqlEntityList() { + } + + public KsqlEntityList(int initialCapacity) { + super(initialCapacity); + } + + public KsqlEntityList(Collection c) { + super(c); + } +} diff --git a/ksql-rest-app/src/main/java/io/confluent/ksql/rest/entity/KsqlRequest.java b/ksql-rest-app/src/main/java/io/confluent/ksql/rest/entity/KsqlRequest.java new file mode 100644 index 000000000000..5b045e7e4cfa --- /dev/null +++ b/ksql-rest-app/src/main/java/io/confluent/ksql/rest/entity/KsqlRequest.java @@ -0,0 +1,53 @@ +/** + * Copyright 2017 Confluent Inc. + **/ + +package io.confluent.ksql.rest.entity; + +import com.fasterxml.jackson.annotation.JsonCreator; +import com.fasterxml.jackson.annotation.JsonProperty; + +import java.util.Collections; +import java.util.Map; +import java.util.Objects; +import java.util.Optional; + +public class KsqlRequest { + private final String ksql; + private final Map streamsProperties; + + @JsonCreator + public KsqlRequest( + @JsonProperty("ksql") String ksql, + @JsonProperty("streamsProperties") Map streamsProperties + ) { + this.ksql = ksql; + this.streamsProperties = Optional.ofNullable(streamsProperties).orElse(Collections.emptyMap()); + } + + public String getKsql() { + return ksql; + } + + public Map getStreamsProperties() { + return streamsProperties; + } + + @Override + public boolean equals(Object o) { + if (this == o) { + return true; + } + if (!(o instanceof KsqlRequest)) { + return false; + } + KsqlRequest that = (KsqlRequest) o; + return Objects.equals(getKsql(), that.getKsql()) + && Objects.equals(getStreamsProperties(), that.getStreamsProperties()); + } + + @Override + public int hashCode() { + return Objects.hash(getKsql(), getStreamsProperties()); + } +} diff --git a/ksql-rest-app/src/main/java/io/confluent/ksql/rest/entity/KsqlTopicInfo.java b/ksql-rest-app/src/main/java/io/confluent/ksql/rest/entity/KsqlTopicInfo.java new file mode 100644 index 000000000000..401a66ea7c1c --- /dev/null +++ b/ksql-rest-app/src/main/java/io/confluent/ksql/rest/entity/KsqlTopicInfo.java @@ -0,0 +1,68 @@ +/** + * Copyright 2017 Confluent Inc. + **/ + +package io.confluent.ksql.rest.entity; + +import com.fasterxml.jackson.annotation.JsonCreator; +import com.fasterxml.jackson.annotation.JsonProperty; +import io.confluent.ksql.metastore.DataSource; +import io.confluent.ksql.metastore.KsqlTopic; + +import java.util.Objects; + +public class KsqlTopicInfo { + private final String name; + private final String kafkaTopic; + private final DataSource.DataSourceSerDe format; + + @JsonCreator + public KsqlTopicInfo( + @JsonProperty("name") String name, + @JsonProperty("kafkaTopic") String kafkaTopic, + @JsonProperty("format") DataSource.DataSourceSerDe format + ) { + this.name = name; + this.kafkaTopic = kafkaTopic; + this.format = format; + } + + public KsqlTopicInfo(KsqlTopic ksqlTopic) { + this( + ksqlTopic.getTopicName(), + ksqlTopic.getKafkaTopicName(), + ksqlTopic.getKsqlTopicSerDe().getSerDe() + ); + } + + public String getName() { + return name; + } + + public String getKafkaTopic() { + return kafkaTopic; + } + + public DataSource.DataSourceSerDe getFormat() { + return format; + } + + @Override + public boolean equals(Object o) { + if (this == o) { + return true; + } + if (!(o instanceof KsqlTopicInfo)) { + return false; + } + KsqlTopicInfo topicInfo = (KsqlTopicInfo) o; + return Objects.equals(getName(), topicInfo.getName()) + && Objects.equals(getKafkaTopic(), topicInfo.getKafkaTopic()) + && getFormat() == topicInfo.getFormat(); + } + + @Override + public int hashCode() { + return Objects.hash(getName(), getKafkaTopic(), getFormat()); + } +} \ No newline at end of file diff --git a/ksql-rest-app/src/main/java/io/confluent/ksql/rest/entity/KsqlTopicsList.java b/ksql-rest-app/src/main/java/io/confluent/ksql/rest/entity/KsqlTopicsList.java new file mode 100644 index 000000000000..6c659fac6a6a --- /dev/null +++ b/ksql-rest-app/src/main/java/io/confluent/ksql/rest/entity/KsqlTopicsList.java @@ -0,0 +1,61 @@ +/** + * Copyright 2017 Confluent Inc. + **/ + +package io.confluent.ksql.rest.entity; + +import com.fasterxml.jackson.annotation.JsonCreator; +import com.fasterxml.jackson.annotation.JsonProperty; +import com.fasterxml.jackson.annotation.JsonTypeName; +import com.fasterxml.jackson.annotation.JsonUnwrapped; +import io.confluent.ksql.metastore.KsqlTopic; + +import java.util.ArrayList; +import java.util.Collection; +import java.util.List; +import java.util.Objects; + +@JsonTypeName("ksql_topics") +public class KsqlTopicsList extends KsqlEntity { + private final Collection topics; + + @JsonCreator + public KsqlTopicsList( + @JsonProperty("statementText") String statementText, + @JsonProperty("ksql_topics") Collection topics + ) { + super(statementText); + this.topics = topics; + } + + @JsonUnwrapped + public List getTopics() { + return new ArrayList<>(topics); + } + + @Override + public boolean equals(Object o) { + if (this == o) { + return true; + } + if (!(o instanceof KsqlTopicsList)) { + return false; + } + KsqlTopicsList that = (KsqlTopicsList) o; + return Objects.equals(getTopics(), that.getTopics()); + } + + @Override + public int hashCode() { + return Objects.hash(getTopics()); + } + + public static KsqlTopicsList build(String statementText, Collection ksqlTopics) { + List ksqlTopicInfoList = new ArrayList<>(); + for (KsqlTopic ksqlTopic: ksqlTopics) { + ksqlTopicInfoList.add(new KsqlTopicInfo(ksqlTopic)); + } + return new KsqlTopicsList(statementText, ksqlTopicInfoList); + } + +} diff --git a/ksql-rest-app/src/main/java/io/confluent/ksql/rest/entity/PropertiesList.java b/ksql-rest-app/src/main/java/io/confluent/ksql/rest/entity/PropertiesList.java new file mode 100644 index 000000000000..6460dbf0be18 --- /dev/null +++ b/ksql-rest-app/src/main/java/io/confluent/ksql/rest/entity/PropertiesList.java @@ -0,0 +1,49 @@ +/** + * Copyright 2017 Confluent Inc. + **/ + +package io.confluent.ksql.rest.entity; + +import com.fasterxml.jackson.annotation.JsonCreator; +import com.fasterxml.jackson.annotation.JsonProperty; +import com.fasterxml.jackson.annotation.JsonTypeName; +import com.fasterxml.jackson.annotation.JsonUnwrapped; + +import java.util.Map; +import java.util.Objects; + +@JsonTypeName("properties") +public class PropertiesList extends KsqlEntity { + private final Map properties; + + @JsonCreator + public PropertiesList( + @JsonProperty("statementText") String statementText, + @JsonProperty("properties") Map properties + ) { + super(statementText); + this.properties = properties; + } + + @JsonUnwrapped + public Map getProperties() { + return properties; + } + + @Override + public boolean equals(Object o) { + if (this == o) { + return true; + } + if (!(o instanceof PropertiesList)) { + return false; + } + PropertiesList that = (PropertiesList) o; + return Objects.equals(getProperties(), that.getProperties()); + } + + @Override + public int hashCode() { + return Objects.hash(getProperties()); + } +} diff --git a/ksql-rest-app/src/main/java/io/confluent/ksql/rest/entity/Queries.java b/ksql-rest-app/src/main/java/io/confluent/ksql/rest/entity/Queries.java new file mode 100644 index 000000000000..a22ec3161a0d --- /dev/null +++ b/ksql-rest-app/src/main/java/io/confluent/ksql/rest/entity/Queries.java @@ -0,0 +1,96 @@ +/** + * Copyright 2017 Confluent Inc. + **/ + +package io.confluent.ksql.rest.entity; + +import com.fasterxml.jackson.annotation.JsonCreator; +import com.fasterxml.jackson.annotation.JsonProperty; +import com.fasterxml.jackson.annotation.JsonTypeName; + +import java.util.ArrayList; +import java.util.List; +import java.util.Objects; + +@JsonTypeName("queries") +public class Queries extends KsqlEntity { + private final List queries; + + @JsonCreator + public Queries( + @JsonProperty("statementText") String statementText, + @JsonProperty("queries") List queries + ) { + super(statementText); + this.queries = queries; + } + + public List getQueries() { + return new ArrayList<>(queries); + } + + public static class RunningQuery { + private final String queryString; + private final String kafkaTopic; + private final long id; + + @JsonCreator + public RunningQuery( + @JsonProperty("queryString") String queryString, + @JsonProperty("kafkaTopic") String kafkaTopic, + @JsonProperty("id") long id + ) { + this.queryString = queryString; + this.kafkaTopic = kafkaTopic; + this.id = id; + } + + public String getQueryString() { + return queryString; + } + + public String getKafkaTopic() { + return kafkaTopic; + } + + public long getId() { + return id; + } + + @Override + public boolean equals(Object o) { + if (this == o) { + return true; + } + if (!(o instanceof RunningQuery)) { + return false; + } + RunningQuery that = (RunningQuery) o; + return getId() == that.getId() + && Objects.equals(getQueryString(), that.getQueryString()) + && Objects.equals(getKafkaTopic(), that.getKafkaTopic()); + } + + @Override + public int hashCode() { + return Objects.hash(getQueryString(), getKafkaTopic(), getId()); + } + } + + @Override + public boolean equals(Object o) { + if (this == o) { + return true; + } + if (!(o instanceof Queries)) { + return false; + } + Queries that = (Queries) o; + return Objects.equals(getQueries(), that.getQueries()); + } + + @Override + public int hashCode() { + return Objects.hash(getQueries()); + } +} diff --git a/ksql-rest-app/src/main/java/io/confluent/ksql/rest/entity/SchemaMapper.java b/ksql-rest-app/src/main/java/io/confluent/ksql/rest/entity/SchemaMapper.java new file mode 100644 index 000000000000..90bdd73f200d --- /dev/null +++ b/ksql-rest-app/src/main/java/io/confluent/ksql/rest/entity/SchemaMapper.java @@ -0,0 +1,95 @@ +/** + * Copyright 2017 Confluent Inc. + **/ + +package io.confluent.ksql.rest.entity; + +import com.fasterxml.jackson.annotation.JsonCreator; +import com.fasterxml.jackson.annotation.JsonProperty; +import com.fasterxml.jackson.core.JsonGenerator; +import com.fasterxml.jackson.core.JsonParser; +import com.fasterxml.jackson.databind.DeserializationContext; +import com.fasterxml.jackson.databind.JsonDeserializer; +import com.fasterxml.jackson.databind.JsonSerializer; +import com.fasterxml.jackson.databind.ObjectMapper; +import com.fasterxml.jackson.databind.SerializerProvider; +import com.fasterxml.jackson.databind.module.SimpleModule; +import org.apache.kafka.connect.data.Field; +import org.apache.kafka.connect.data.Schema; +import org.apache.kafka.connect.json.JsonConverter; + +import java.io.IOException; +import java.util.Collections; +import java.util.Map; + +public class SchemaMapper { + private final Map configs; + + public SchemaMapper() { + this(Collections.emptyMap()); + } + + public SchemaMapper(Map configs) { + this.configs = configs; + } + + public ObjectMapper registerToObjectMapper(ObjectMapper objectMapper) { + return objectMapper + .registerModule(new SimpleModule() + .addSerializer(Schema.class, new SchemaJsonSerializer(createNewConverter())) + .addDeserializer(Schema.class, new SchemaJsonDeserializer(createNewConverter()))) + .addMixIn(Field.class, FieldMixin.class); + } + + private JsonConverter createNewConverter() { + JsonConverter result = new JsonConverter(); + result.configure(configs, false); + return result; + } + + private static class SchemaJsonSerializer extends JsonSerializer { + private final JsonConverter jsonConverter; + + public SchemaJsonSerializer(JsonConverter jsonConverter) { + this.jsonConverter = jsonConverter; + } + + @Override + public void serialize( + Schema schema, + JsonGenerator jsonGenerator, + SerializerProvider serializerProvider + ) throws IOException { + jsonGenerator.writeTree(jsonConverter.asJsonSchema(schema)); + } + } + + private static class SchemaJsonDeserializer extends JsonDeserializer { + private final JsonConverter jsonConverter; + + public SchemaJsonDeserializer(JsonConverter jsonConverter) { + this.jsonConverter = jsonConverter; + } + + @Override + public Schema deserialize(JsonParser jsonParser, DeserializationContext deserializationContext) + throws IOException { + return jsonConverter.asConnectSchema(jsonParser.readValueAsTree()); + } + } + + private static class FieldMixin { + @JsonProperty("name") private String name; + @JsonProperty("index") private int index; + @JsonProperty("schema") private Schema schema; + + @JsonCreator + public FieldMixin( + @JsonProperty("name") String name, + @JsonProperty("index") int index, + @JsonProperty("schema") Schema schema + ) { + + } + } +} \ No newline at end of file diff --git a/ksql-rest-app/src/main/java/io/confluent/ksql/rest/entity/ServerInfo.java b/ksql-rest-app/src/main/java/io/confluent/ksql/rest/entity/ServerInfo.java new file mode 100644 index 000000000000..55408a8cf9e4 --- /dev/null +++ b/ksql-rest-app/src/main/java/io/confluent/ksql/rest/entity/ServerInfo.java @@ -0,0 +1,44 @@ +/** + * Copyright 2017 Confluent Inc. + **/ + +package io.confluent.ksql.rest.entity; + +import com.fasterxml.jackson.annotation.JsonCreator; +import com.fasterxml.jackson.annotation.JsonProperty; +import com.fasterxml.jackson.annotation.JsonTypeInfo; +import com.fasterxml.jackson.annotation.JsonTypeName; + +import java.util.Objects; + +@JsonTypeInfo(include = JsonTypeInfo.As.WRAPPER_OBJECT, use = JsonTypeInfo.Id.NAME) +@JsonTypeName("KSQL Server Info") +public class ServerInfo { + private final String version; + + @JsonCreator + public ServerInfo(@JsonProperty("version") String version) { + this.version = version; + } + + public String getVersion() { + return version; + } + + @Override + public boolean equals(Object o) { + if (this == o) { + return true; + } + if (!(o instanceof ServerInfo)) { + return false; + } + ServerInfo serverInfo1 = (ServerInfo) o; + return Objects.equals(getVersion(), serverInfo1.getVersion()); + } + + @Override + public int hashCode() { + return Objects.hash(getVersion()); + } +} diff --git a/ksql-rest-app/src/main/java/io/confluent/ksql/rest/entity/SourceDescription.java b/ksql-rest-app/src/main/java/io/confluent/ksql/rest/entity/SourceDescription.java new file mode 100644 index 000000000000..0ec19166b87c --- /dev/null +++ b/ksql-rest-app/src/main/java/io/confluent/ksql/rest/entity/SourceDescription.java @@ -0,0 +1,149 @@ +/** + * Copyright 2017 Confluent Inc. + **/ + +package io.confluent.ksql.rest.entity; + +import com.fasterxml.jackson.annotation.JsonCreator; +import com.fasterxml.jackson.annotation.JsonProperty; +import com.fasterxml.jackson.annotation.JsonTypeName; +import com.fasterxml.jackson.annotation.JsonValue; +import com.fasterxml.jackson.annotation.JsonView; + +import io.confluent.ksql.metastore.DataSource; +import io.confluent.ksql.metastore.StructuredDataSource; +import io.confluent.ksql.util.Pair; +import io.confluent.ksql.util.SchemaUtil; + +import org.apache.kafka.connect.data.Field; +import org.apache.kafka.connect.data.Schema; + +import java.util.ArrayList; +import java.util.List; +import java.util.Objects; +import java.util.Optional; +import java.util.stream.Collectors; + +@JsonTypeName("description") +public class SourceDescription extends KsqlEntity { + + private final String name; + private final List schema; + private final DataSource.DataSourceType type; + private final String key; + private final String timestamp; + + @JsonCreator + public SourceDescription( + @JsonProperty("statementText") String statementText, + @JsonProperty("name") String name, + @JsonProperty("schema") List schema, + @JsonProperty("type") DataSource.DataSourceType type, + @JsonProperty("key") String key, + @JsonProperty("timestamp") String timestamp + ) { + super(statementText); + this.name = name; + this.schema = schema; + this.type = type; + this.key = key; + this.timestamp = timestamp; + } + + public SourceDescription(String statementText, StructuredDataSource dataSource) { + + this( + statementText, + dataSource.getName(), + dataSource.getSchema().fields().stream().map( + field -> { + return new FieldSchemaInfo(field.name(), SchemaUtil + .getSchemaFieldName(field)); + }).collect(Collectors.toList()), + dataSource.getDataSourceType(), + Optional.ofNullable(dataSource.getKeyField()).map(Field::name).orElse(null), + Optional.ofNullable(dataSource.getTimestampField()).map(Field::name).orElse(null) + ); + } + + public String getName() { + return name; + } + + public List getSchema() { + return schema; + } + + public DataSource.DataSourceType getType() { + return type; + } + + public String getKey() { + return key; + } + + public String getTimestamp() { + return timestamp; + } + + @Override + public boolean equals(Object o) { + if (this == o) { + return true; + } + if (!(o instanceof SourceDescription)) { + return false; + } + SourceDescription that = (SourceDescription) o; + return Objects.equals(getName(), that.getName()) + && Objects.equals(getSchema(), that.getSchema()) + && getType() == that.getType() + && Objects.equals(getKey(), that.getKey()) + && Objects.equals(getTimestamp(), that.getTimestamp()); + } + + @Override + public int hashCode() { + return Objects.hash(getName(), getSchema(), getType(), getKey(), getTimestamp()); + } + + public static class FieldSchemaInfo { + private final String name; + private final String type; + + @JsonCreator + public FieldSchemaInfo( + @JsonProperty("name") String name, + @JsonProperty("type") String type + ) { + this.name = name; + this.type = type; + } + + public String getName() { + return name; + } + + public String getType() { + return type; + } + + @Override + public boolean equals(Object o) { + if (this == o) { + return true; + } + if (!(o instanceof FieldSchemaInfo)) { + return false; + } + FieldSchemaInfo that = (FieldSchemaInfo) o; + return Objects.equals(getName(), that.getName()) + && Objects.equals(getType(), that.getType()); + } + + @Override + public int hashCode() { + return Objects.hash(getName(), getType()); + } + } +} diff --git a/ksql-rest-app/src/main/java/io/confluent/ksql/rest/entity/StreamedRow.java b/ksql-rest-app/src/main/java/io/confluent/ksql/rest/entity/StreamedRow.java new file mode 100644 index 000000000000..cd523bf7b3b1 --- /dev/null +++ b/ksql-rest-app/src/main/java/io/confluent/ksql/rest/entity/StreamedRow.java @@ -0,0 +1,62 @@ +/** + * Copyright 2017 Confluent Inc. + **/ + +package io.confluent.ksql.rest.entity; + +import com.fasterxml.jackson.annotation.JsonCreator; +import com.fasterxml.jackson.annotation.JsonProperty; +import io.confluent.ksql.physical.GenericRow; + +import java.util.Objects; + +public class StreamedRow { + private final GenericRow row; + private final ErrorMessage errorMessage; + + @JsonCreator + public StreamedRow( + @JsonProperty("row") GenericRow row, + @JsonProperty("errorMessage") ErrorMessage errorMessage + ) { + if ((row == null) == (errorMessage == null)) { + throw new IllegalArgumentException("Exactly one of row and error message must be null"); + } + this.row = row; + this.errorMessage = errorMessage; + } + + public StreamedRow(GenericRow row) { + this(row, null); + } + + public StreamedRow(Throwable exception) { + this(null, new ErrorMessage(exception)); + } + + public GenericRow getRow() { + return row; + } + + public ErrorMessage getErrorMessage() { + return errorMessage; + } + + @Override + public boolean equals(Object o) { + if (this == o) { + return true; + } + if (!(o instanceof StreamedRow)) { + return false; + } + StreamedRow that = (StreamedRow) o; + return Objects.equals(getRow(), that.getRow()) + && Objects.equals(getErrorMessage(), that.getErrorMessage()); + } + + @Override + public int hashCode() { + return Objects.hash(getRow(), getErrorMessage()); + } +} diff --git a/ksql-rest-app/src/main/java/io/confluent/ksql/rest/entity/StreamsList.java b/ksql-rest-app/src/main/java/io/confluent/ksql/rest/entity/StreamsList.java new file mode 100644 index 000000000000..fa41ce88c710 --- /dev/null +++ b/ksql-rest-app/src/main/java/io/confluent/ksql/rest/entity/StreamsList.java @@ -0,0 +1,118 @@ +/** + * Copyright 2017 Confluent Inc. + **/ + +package io.confluent.ksql.rest.entity; + +import com.fasterxml.jackson.annotation.JsonCreator; +import com.fasterxml.jackson.annotation.JsonProperty; +import com.fasterxml.jackson.annotation.JsonTypeName; +import com.fasterxml.jackson.annotation.JsonUnwrapped; +import io.confluent.ksql.metastore.KsqlStream; + +import java.util.ArrayList; +import java.util.Collection; +import java.util.List; +import java.util.Objects; +import java.util.stream.Collectors; + +@JsonTypeName("streams") +public class StreamsList extends KsqlEntity { + private final Collection streams; + + @JsonCreator + public StreamsList( + @JsonProperty("statementText") String statementText, + @JsonProperty("streams") Collection streams + ) { + super(statementText); + this.streams = streams; + } + + public static StreamsList fromKsqlStreams( + String statementText, + Collection ksqlStreams + ) { + Collection streamInfos = + ksqlStreams.stream().map(StreamInfo::new).collect(Collectors.toList()); + return new StreamsList(statementText, streamInfos); + } + + @JsonUnwrapped + public List getStreams() { + return new ArrayList<>(streams); + } + + @Override + public boolean equals(Object o) { + if (this == o) { + return true; + } + if (!(o instanceof StreamsList)) { + return false; + } + StreamsList that = (StreamsList) o; + return Objects.equals(getStreams(), that.getStreams()); + } + + @Override + public int hashCode() { + return Objects.hash(getStreams()); + } + + public static class StreamInfo { + private final String name; + private final String topic; + private final String format; + + @JsonCreator + public StreamInfo( + @JsonProperty("name") String name, + @JsonProperty("topic") String topic, + @JsonProperty("format") String format + ) { + this.name = name; + this.topic = topic; + this.format = format; + } + + public StreamInfo(KsqlStream ksqlStream) { + this( + ksqlStream.getName(), + ksqlStream.getKsqlTopic().getKafkaTopicName(), + ksqlStream.getKsqlTopic().getKsqlTopicSerDe().getSerDe().name() + ); + } + + public String getName() { + return name; + } + + public String getTopic() { + return topic; + } + + public String getFormat() { + return format; + } + + @Override + public boolean equals(Object o) { + if (this == o) { + return true; + } + if (!(o instanceof StreamInfo)) { + return false; + } + StreamInfo that = (StreamInfo) o; + return Objects.equals(getName(), that.getName()) + && Objects.equals(getTopic(), that.getTopic()) + && Objects.equals(getFormat(), that.getFormat()); + } + + @Override + public int hashCode() { + return Objects.hash(getName(), getTopic(), getFormat()); + } + } +} diff --git a/ksql-rest-app/src/main/java/io/confluent/ksql/rest/entity/TablesList.java b/ksql-rest-app/src/main/java/io/confluent/ksql/rest/entity/TablesList.java new file mode 100644 index 000000000000..f5f122c32d20 --- /dev/null +++ b/ksql-rest-app/src/main/java/io/confluent/ksql/rest/entity/TablesList.java @@ -0,0 +1,124 @@ +/** + * Copyright 2017 Confluent Inc. + **/ + +package io.confluent.ksql.rest.entity; + +import com.fasterxml.jackson.annotation.JsonCreator; +import com.fasterxml.jackson.annotation.JsonProperty; +import com.fasterxml.jackson.annotation.JsonTypeName; +import com.fasterxml.jackson.annotation.JsonUnwrapped; +import io.confluent.ksql.metastore.KsqlTable; + +import java.util.ArrayList; +import java.util.Collection; +import java.util.List; +import java.util.Objects; +import java.util.stream.Collectors; + +@JsonTypeName("tables") +public class TablesList extends KsqlEntity { + private final Collection tables; + + @JsonCreator + public TablesList( + @JsonProperty("statementText") String statementText, + @JsonProperty("tables") Collection tables + ) { + super(statementText); + this.tables = tables; + } + + public static TablesList fromKsqlTables(String statementText, Collection ksqlTables) { + Collection tableInfos = + ksqlTables.stream().map(TableInfo::new).collect(Collectors.toList()); + return new TablesList(statementText, tableInfos); + } + + @JsonUnwrapped + public List getTables() { + return new ArrayList<>(tables); + } + + @Override + public boolean equals(Object o) { + if (this == o) { + return true; + } + if (!(o instanceof TablesList)) { + return false; + } + TablesList that = (TablesList) o; + return Objects.equals(getTables(), that.getTables()); + } + + @Override + public int hashCode() { + return Objects.hash(getTables()); + } + + public static class TableInfo { + private final String name; + private final String topic; + private final String format; + private final boolean isWindowed; + + @JsonCreator + public TableInfo( + @JsonProperty("name") String name, + @JsonProperty("topic") String topic, + @JsonProperty("format") String format, + @JsonProperty("isWindowed") boolean isWindowed + ) { + this.name = name; + this.topic = topic; + this.format = format; + this.isWindowed = isWindowed; + } + + public TableInfo(KsqlTable ksqlTable) { + this( + ksqlTable.getName(), + ksqlTable.getKsqlTopic().getKafkaTopicName(), + ksqlTable.getKsqlTopic().getKsqlTopicSerDe().getSerDe().name(), + ksqlTable.isWindowed() + ); + } + + public String getName() { + return name; + } + + public String getTopic() { + return topic; + } + + public String getFormat() { + return format; + } + + public boolean getIsWindowed() { + return isWindowed; + } + + @Override + public boolean equals(Object o) { + if (this == o) { + return true; + } + if (!(o instanceof TableInfo)) { + return false; + } + TableInfo tableInfo = (TableInfo) o; + return getIsWindowed() == tableInfo.getIsWindowed() + && Objects.equals(getName(), tableInfo.getName()) + && Objects.equals(getTopic(), tableInfo.getTopic()) + && Objects.equals(getFormat(), tableInfo.getFormat()); + } + + @Override + public int hashCode() { + return Objects.hash(getName(), getTopic(), getFormat(), isWindowed); + } + } +} diff --git a/ksql-rest-app/src/main/java/io/confluent/ksql/rest/entity/TopicDescription.java b/ksql-rest-app/src/main/java/io/confluent/ksql/rest/entity/TopicDescription.java new file mode 100644 index 000000000000..1d94cda02330 --- /dev/null +++ b/ksql-rest-app/src/main/java/io/confluent/ksql/rest/entity/TopicDescription.java @@ -0,0 +1,73 @@ +/** + * Copyright 2017 Confluent Inc. + **/ + +package io.confluent.ksql.rest.entity; + + +import com.fasterxml.jackson.annotation.JsonCreator; +import com.fasterxml.jackson.annotation.JsonProperty; +import com.fasterxml.jackson.annotation.JsonTypeName; + +import java.util.Objects; + +import io.confluent.ksql.metastore.StructuredDataSource; + +@JsonTypeName("topic_description") +public class TopicDescription extends KsqlEntity { + private final String name; + private final String kafkaTopic; + private final String format; + private final String schemaString; + + + @JsonCreator + public TopicDescription( + @JsonProperty("statementText") String statementText, + @JsonProperty("name") String name, + @JsonProperty("kafkaTopic") String kafkaTopic, + @JsonProperty("format") String format, + @JsonProperty("schemaString") String schemaString + ) { + super(statementText); + this.name = name; + this.kafkaTopic = kafkaTopic; + this.format = format; + this.schemaString = schemaString; + } + + public String getName() { + return name; + } + + public String getKafkaTopic() { + return kafkaTopic; + } + + public String getFormat() { + return format; + } + + public String getSchemaString() { + return schemaString; + } + + @Override + public boolean equals(Object o) { + if (this == o) { + return true; + } + if (!(o instanceof TopicDescription)) { + return false; + } + TopicDescription that = (TopicDescription) o; + return Objects.equals(getName(), that.getName()) + && Objects.equals(getKafkaTopic(), that.getKafkaTopic()) + && Objects.equals(getFormat(), that.getFormat()); + } + + @Override + public int hashCode() { + return Objects.hash(getName(), getKafkaTopic(), getFormat()); + } +} diff --git a/ksql-rest-app/src/main/java/io/confluent/ksql/rest/server/CliOptions.java b/ksql-rest-app/src/main/java/io/confluent/ksql/rest/server/CliOptions.java new file mode 100644 index 000000000000..3173d6658691 --- /dev/null +++ b/ksql-rest-app/src/main/java/io/confluent/ksql/rest/server/CliOptions.java @@ -0,0 +1,81 @@ +/** + * Copyright 2017 Confluent Inc. + **/ + +package io.confluent.ksql.rest.server; + +import com.github.rvesse.airline.HelpOption; +import com.github.rvesse.airline.SingleCommand; +import com.github.rvesse.airline.annotations.Arguments; +import com.github.rvesse.airline.annotations.Command; +import com.github.rvesse.airline.annotations.Option; +import com.github.rvesse.airline.annotations.restrictions.Once; +import com.github.rvesse.airline.annotations.restrictions.Required; +import com.github.rvesse.airline.help.Help; +import com.github.rvesse.airline.parser.errors.ParseException; + +import javax.inject.Inject; +import java.io.IOException; + +// TODO: Enable specification of properties here so that useful defaults for bootstrap server, port, +// etc. can be established in the ksql-server-start script + +@Command(name = "KSQL-Server", description = "KSQL Cluster") +public class CliOptions { + + private static final String QUICKSTART_OPTION_NAME = "--quickstart"; + + // Only here so that the help message generated by Help.help() is accurate + @Inject + public HelpOption help; + + @Once + @Required + @Arguments( + title = "properties-file", + description = "A file specifying properties for the KSQL Server, KSQL, " + + "and its underlying Kafka Streams instance(s)" + ) + private String propertiesFile; + + public String getPropertiesFile() { + return propertiesFile; + } + + @Option( + name = QUICKSTART_OPTION_NAME, + description = "Whether to serve a quickstart file (located in the resources directory) " + + "at /quickstart.html" + ) + private boolean quickstart; + + public boolean getQuickstart() { + return quickstart; + } + + public static CliOptions parse(String[] args) throws IOException { + + SingleCommand optionsParser = SingleCommand.singleCommand(CliOptions.class); + + // If just a help flag is given, an exception will be thrown due to missing required options; + // hence, this workaround + for (String arg : args) { + if ("--help".equals(arg) || "-h".equals(arg)) { + Help.help(optionsParser.getCommandMetadata()); + return null; + } + } + + try { + return optionsParser.parse(args); + } catch (ParseException exception) { + if (exception.getMessage() != null) { + System.err.println(exception.getMessage()); + } else { + System.err.println("Options parsing failed for an unknown reason"); + } + System.err.println("See the -h or --help flags for usage information"); + return null; + } + } +} diff --git a/ksql-rest-app/src/main/java/io/confluent/ksql/rest/server/KsqlRestApplication.java b/ksql-rest-app/src/main/java/io/confluent/ksql/rest/server/KsqlRestApplication.java new file mode 100644 index 000000000000..90ea94c9c701 --- /dev/null +++ b/ksql-rest-app/src/main/java/io/confluent/ksql/rest/server/KsqlRestApplication.java @@ -0,0 +1,319 @@ +/** + * Copyright 2017 Confluent Inc. + **/ + +package io.confluent.ksql.rest.server; + +import com.fasterxml.jackson.databind.ObjectMapper; +import com.fasterxml.jackson.jaxrs.base.JsonParseExceptionMapper; +import io.confluent.kafka.serializers.KafkaJsonDeserializer; +import io.confluent.kafka.serializers.KafkaJsonDeserializerConfig; +import io.confluent.kafka.serializers.KafkaJsonSerializer; +import io.confluent.ksql.KsqlEngine; +import io.confluent.ksql.ddl.DdlConfig; +import io.confluent.ksql.ddl.commands.CreateStreamCommand; +import io.confluent.ksql.ddl.commands.RegisterTopicCommand; +import io.confluent.ksql.exception.KafkaTopicException; +import io.confluent.ksql.parser.tree.CreateStream; +import io.confluent.ksql.parser.tree.RegisterTopic; +import io.confluent.ksql.parser.tree.Expression; +import io.confluent.ksql.parser.tree.QualifiedName; +import io.confluent.ksql.parser.tree.StringLiteral; +import io.confluent.ksql.parser.tree.TableElement; +import io.confluent.ksql.rest.entity.SchemaMapper; +import io.confluent.ksql.rest.entity.ServerInfo; +import io.confluent.ksql.rest.server.computation.Command; +import io.confluent.ksql.rest.server.computation.CommandId; +import io.confluent.ksql.rest.server.computation.CommandIdAssigner; +import io.confluent.ksql.rest.server.computation.CommandRunner; +import io.confluent.ksql.rest.server.computation.CommandStore; +import io.confluent.ksql.rest.server.computation.StatementExecutor; +import io.confluent.ksql.rest.server.resources.KsqlExceptionMapper; +import io.confluent.ksql.rest.server.resources.KsqlResource; +import io.confluent.ksql.rest.server.resources.StatusResource; +import io.confluent.ksql.rest.server.resources.ServerInfoResource; +import io.confluent.ksql.rest.server.resources.streaming.StreamedQueryResource; +import io.confluent.ksql.util.KafkaTopicClient; +import io.confluent.ksql.util.KafkaTopicClientImpl; +import io.confluent.ksql.util.KsqlConfig; +import io.confluent.ksql.util.Version; +import io.confluent.rest.Application; +import io.confluent.rest.validation.JacksonMessageBodyProvider; + +import org.apache.kafka.clients.consumer.KafkaConsumer; +import org.apache.kafka.clients.producer.KafkaProducer; +import org.apache.kafka.common.serialization.Deserializer; +import org.apache.kafka.common.serialization.Serializer; +import org.eclipse.jetty.util.resource.Resource; +import org.eclipse.jetty.util.resource.ResourceCollection; +import org.glassfish.jersey.server.ServerProperties; +import org.glassfish.jersey.servlet.ServletProperties; +import org.slf4j.Logger; +import org.slf4j.LoggerFactory; + +import javax.ws.rs.core.Configurable; +import java.io.FileInputStream; +import java.io.IOException; +import java.util.Collections; +import java.util.HashMap; +import java.util.Map; +import java.util.Properties; + +public class KsqlRestApplication extends Application { + + private static final Logger log = LoggerFactory.getLogger(KsqlRestApplication.class); + + public static final String COMMANDS_KSQL_TOPIC_NAME = "__KSQL_COMMANDS_TOPIC"; + public static final String COMMANDS_STREAM_NAME = "KSQL_COMMANDS"; + + private final KsqlEngine ksqlEngine; + private final CommandRunner commandRunner; + private final ServerInfoResource serverInfoResource; + private final StatusResource statusResource; + private final StreamedQueryResource streamedQueryResource; + private final KsqlResource ksqlResource; + private final boolean enableQuickstartPage; + + private final Thread commandRunnerThread; + + public static String getCommandsKsqlTopicName() { + return COMMANDS_KSQL_TOPIC_NAME; + } + + public static String getCommandsStreamName() { + return COMMANDS_STREAM_NAME; + } + + public KsqlRestApplication( + KsqlEngine ksqlEngine, + KsqlRestConfig config, + CommandRunner commandRunner, + ServerInfoResource serverInfoResource, + StatusResource statusResource, + StreamedQueryResource streamedQueryResource, + KsqlResource ksqlResource, + boolean enableQuickstartPage + ) { + super(config); + this.ksqlEngine = ksqlEngine; + this.commandRunner = commandRunner; + this.serverInfoResource = serverInfoResource; + this.statusResource = statusResource; + this.streamedQueryResource = streamedQueryResource; + this.ksqlResource = ksqlResource; + this.enableQuickstartPage = enableQuickstartPage; + + this.commandRunnerThread = new Thread(commandRunner); + } + + @Override + public void setupResources(Configurable config, KsqlRestConfig appConfig) { + config.register(serverInfoResource); + config.register(statusResource); + config.register(ksqlResource); + config.register(streamedQueryResource); + config.register(new KsqlExceptionMapper()); + } + + @Override + public ResourceCollection getStaticResources() { + if (enableQuickstartPage) { + return new ResourceCollection(Resource.newClassPathResource("/io/confluent/ksql/rest/")); + } else { + return super.getStaticResources(); + } + } + + private static Properties getProps(String propsFile) throws IOException { + Properties result = new Properties(); + result.load(new FileInputStream(propsFile)); + return result; + } + + @Override + public void start() throws Exception { + super.start(); + commandRunnerThread.start(); + } + + @Override + public void stop() throws Exception { + ksqlEngine.close(); + commandRunner.close(); + try { + commandRunnerThread.join(); + } catch (InterruptedException exception) { + log.error("Interrupted while waiting for CommandRunner thread to complete", exception); + } + super.stop(); + } + + @Override + public void configureBaseApplication(Configurable config, Map metricTags) { + // Would call this but it registers additional, unwanted exception mappers + // super.configureBaseApplication(config, metricTags); + // Instead, just copy+paste the desired parts from Application.configureBaseApplication() here: + ObjectMapper jsonMapper = getJsonMapper(); + new SchemaMapper().registerToObjectMapper(jsonMapper); + + JacksonMessageBodyProvider jsonProvider = new JacksonMessageBodyProvider(jsonMapper); + config.register(jsonProvider); + config.register(JsonParseExceptionMapper.class); + + // Don't want to buffer rows when streaming JSON in a request to the query resource + config.property(ServerProperties.OUTBOUND_CONTENT_LENGTH_BUFFER, 0); + if (enableQuickstartPage) { + config.property(ServletProperties.FILTER_STATIC_CONTENT_REGEX, "^/quickstart\\.html$"); + } + } + + public static void main(String[] args) throws Exception { + CliOptions cliOptions = CliOptions.parse(args); + if (cliOptions == null) { + return; + } + + KsqlRestConfig restConfig = new KsqlRestConfig(getProps(cliOptions.getPropertiesFile())); + KsqlRestApplication app = buildApplication(restConfig, cliOptions.getQuickstart()); + + log.info("Starting server"); + app.start(); + log.info("Server up and running"); + app.join(); + log.info("Server shutting down"); + } + + public static KsqlRestApplication buildApplication(KsqlRestConfig restConfig, boolean quickstart) + throws Exception { + + Map ksqlConfProperties = new HashMap<>(); + ksqlConfProperties.putAll(restConfig.getCommandConsumerProperties()); + ksqlConfProperties.putAll(restConfig.getCommandProducerProperties()); + ksqlConfProperties.putAll(restConfig.getKsqlStreamsProperties()); + ksqlConfProperties.putAll(restConfig.getOriginals()); + + KsqlConfig ksqlConfig = new KsqlConfig(ksqlConfProperties); + KsqlEngine ksqlEngine = new KsqlEngine(ksqlConfig, new KafkaTopicClientImpl(ksqlConfig)); + KafkaTopicClient client = ksqlEngine.getKafkaTopicClient(); + + String commandTopic = restConfig.getCommandTopic(); + + try { + client.createTopic(commandTopic, 1, (short) 1); + } catch (KafkaTopicException e) { + log.info("Command Topic Exists: " + e.getMessage()); + } + + Map commandTopicProperties = new HashMap<>(); + commandTopicProperties.put( + DdlConfig.VALUE_FORMAT_PROPERTY, + new StringLiteral("json") + ); + commandTopicProperties.put( + DdlConfig.KAFKA_TOPIC_NAME_PROPERTY, + new StringLiteral(commandTopic) + ); + + ksqlEngine.getDDLCommandExec().execute(new RegisterTopicCommand(new RegisterTopic( + QualifiedName.of(COMMANDS_KSQL_TOPIC_NAME), + false, + commandTopicProperties))); + + ksqlEngine.getDDLCommandExec().execute(new CreateStreamCommand(new CreateStream( + QualifiedName.of(COMMANDS_STREAM_NAME), + Collections.singletonList(new TableElement("STATEMENT", "STRING")), + false, + Collections.singletonMap( + DdlConfig.TOPIC_NAME_PROPERTY, + new StringLiteral(COMMANDS_KSQL_TOPIC_NAME) + )), Collections.emptyMap(), ksqlEngine.getKafkaTopicClient())); + + Map commandConsumerProperties = restConfig.getCommandConsumerProperties(); + KafkaConsumer commandConsumer = new KafkaConsumer<>( + commandConsumerProperties, + getJsonDeserializer(CommandId.class, true), + getJsonDeserializer(Command.class, false) + ); + + KafkaProducer commandProducer = new KafkaProducer<>( + restConfig.getCommandProducerProperties(), + getJsonSerializer(true), + getJsonSerializer(false) + ); + + CommandStore commandStore = new CommandStore( + commandTopic, + commandConsumer, + commandProducer, + new CommandIdAssigner(ksqlEngine.getMetaStore()) + ); + + StatementParser statementParser = new StatementParser(ksqlEngine); + + StatementExecutor statementExecutor = new StatementExecutor( + ksqlEngine, + statementParser + ); + + CommandRunner commandRunner = new CommandRunner( + statementExecutor, + commandStore + ); + + ServerInfoResource serverInfoResource = + new ServerInfoResource(new ServerInfo(Version.getVersion())); + StatusResource statusResource = new StatusResource(statementExecutor); + StreamedQueryResource streamedQueryResource = new StreamedQueryResource( + ksqlEngine, + statementParser, + restConfig.getLong(KsqlRestConfig.STREAMED_QUERY_DISCONNECT_CHECK_MS_CONFIG) + ); + KsqlResource ksqlResource = new KsqlResource( + ksqlEngine, + commandStore, + statementExecutor, + restConfig.getLong(KsqlRestConfig.DISTRIBUTED_COMMAND_RESPONSE_TIMEOUT_MS_CONFIG) + ); + + commandRunner.processPriorCommands(); + + return new KsqlRestApplication( + ksqlEngine, + restConfig, + commandRunner, + serverInfoResource, + statusResource, + streamedQueryResource, + ksqlResource, + quickstart + ); + } + + private static Serializer getJsonSerializer(boolean isKey) { + Serializer result = new KafkaJsonSerializer<>(); + result.configure(Collections.emptyMap(), isKey); + return result; + } + + private static Deserializer getJsonDeserializer(Class classs, boolean isKey) { + Deserializer result = new KafkaJsonDeserializer<>(); + String typeConfigProperty = isKey + ? KafkaJsonDeserializerConfig.JSON_KEY_TYPE + : KafkaJsonDeserializerConfig.JSON_VALUE_TYPE; + + Map props = Collections.singletonMap( + typeConfigProperty, + classs + ); + result.configure(props, isKey); + return result; + } + + public KsqlEngine getKsqlEngine() { + return ksqlEngine; + } +} + +/* + TODO: Find a good, forwards-compatible use for the root resource + */ diff --git a/ksql-rest-app/src/main/java/io/confluent/ksql/rest/server/KsqlRestConfig.java b/ksql-rest-app/src/main/java/io/confluent/ksql/rest/server/KsqlRestConfig.java new file mode 100644 index 000000000000..60c68a92c660 --- /dev/null +++ b/ksql-rest-app/src/main/java/io/confluent/ksql/rest/server/KsqlRestConfig.java @@ -0,0 +1,122 @@ +/** + * Copyright 2017 Confluent Inc. + **/ + +package io.confluent.ksql.rest.server; + +import io.confluent.common.config.ConfigDef; +import io.confluent.ksql.util.KsqlConfig; +import io.confluent.rest.RestConfig; + +import java.util.Map; + +// Although it would be nice to somehow extend the functionality of this class to encompass that of +// the KsqlConfig, there is no clean way to do so since the KsqlConfig inherits from the Kafka +// AbstractConfig class, and the RestConfig inherits from the Confluent AbstractConfig class. Making +// the two get along and play nicely together in one class is more work than it's worth, so any and +// all validation to be performed by the KsqlConfig class will be handled outside of this one. +public class KsqlRestConfig extends RestConfig { + + public static final String KSQL_STREAMS_PREFIX = "ksql.core.streams."; + public static final String COMMAND_CONSUMER_PREFIX = "ksql.command.consumer."; + public static final String COMMAND_PRODUCER_PREFIX = "ksql.command.producer."; + + public static final String + COMMAND_TOPIC_SUFFIX_CONFIG = "ksql.command.topic.suffix"; + public static final ConfigDef.Type + COMMAND_TOPIC_SUFFIX_TYPE = ConfigDef.Type.STRING; + public static final String + COMMAND_TOPIC_SUFFIX_DEFAULT = "commands"; + public static final ConfigDef.Importance + COMMAND_TOPIC_SUFFIX_IMPORTANCE = ConfigDef.Importance.LOW; + public static final String + COMMAND_TOPIC_SUFFIX_DOC = + "A suffix to append to the end of the name of the Kafka topic to use for distributing " + + "commands"; + + public static final String + STREAMED_QUERY_DISCONNECT_CHECK_MS_CONFIG = "query.stream.disconnect.check"; + public static final ConfigDef.Type + STREAMED_QUERY_DISCONNECT_CHECK_MS_TYPE = ConfigDef.Type.LONG; + public static final Long + STREAMED_QUERY_DISCONNECT_CHECK_MS_DEFAULT = 1000L; + public static final ConfigDef.Importance + STREAMED_QUERY_DISCONNECT_CHECK_MS_IMPORTANCE = ConfigDef.Importance.LOW; + public static final String + STREAMED_QUERY_DISCONNECT_CHECK_MS_DOC = + "How often to send an empty line as part of the response while streaming queries as " + + "JSON; this helps proactively determine if the connection has been terminated in " + + "order to avoid keeping the created streams job alive longer than necessary"; + + public static final String + DISTRIBUTED_COMMAND_RESPONSE_TIMEOUT_MS_CONFIG = "command.response.timeout.ms"; + public static final ConfigDef.Type + DISTRIBUTED_COMMAND_RESPONSE_TIMEOUT_MS_TYPE = ConfigDef.Type.LONG; + public static final Long + DISTRIBUTED_COMMAND_RESPONSE_TIMEOUT_MS_DEFAULT = 1000L; + public static final ConfigDef.Importance + DISTRIBUTED_COMMAND_RESPONSE_TIMEOUT_MS_IMPORTANCE = ConfigDef.Importance.LOW; + public static final String + DISTRIBUTED_COMMAND_RESPONSE_TIMEOUT_MS_DOC = + "How long to wait for a distributed command to be executed by the local node before " + + "returning a response"; + + private static final ConfigDef CONFIG_DEF; + + static { + CONFIG_DEF = baseConfigDef().define( + COMMAND_TOPIC_SUFFIX_CONFIG, + COMMAND_TOPIC_SUFFIX_TYPE, + COMMAND_TOPIC_SUFFIX_DEFAULT, + COMMAND_TOPIC_SUFFIX_IMPORTANCE, + COMMAND_TOPIC_SUFFIX_DOC + ).define( + STREAMED_QUERY_DISCONNECT_CHECK_MS_CONFIG, + STREAMED_QUERY_DISCONNECT_CHECK_MS_TYPE, + STREAMED_QUERY_DISCONNECT_CHECK_MS_DEFAULT, + STREAMED_QUERY_DISCONNECT_CHECK_MS_IMPORTANCE, + STREAMED_QUERY_DISCONNECT_CHECK_MS_DOC + ).define( + DISTRIBUTED_COMMAND_RESPONSE_TIMEOUT_MS_CONFIG, + DISTRIBUTED_COMMAND_RESPONSE_TIMEOUT_MS_TYPE, + DISTRIBUTED_COMMAND_RESPONSE_TIMEOUT_MS_DEFAULT, + DISTRIBUTED_COMMAND_RESPONSE_TIMEOUT_MS_IMPORTANCE, + DISTRIBUTED_COMMAND_RESPONSE_TIMEOUT_MS_DOC + ); + } + + public KsqlRestConfig(Map props) { + super(CONFIG_DEF, props); + } + + // Bit of a hack to get around the fact that RestConfig.originals() is private for some reason + public Map getOriginals() { + return originalsWithPrefix(""); + } + + private Map getPropertiesWithOverrides(String prefix) { + Map result = getOriginals(); + result.putAll(originalsWithPrefix(prefix)); + return result; + } + + public Map getCommandConsumerProperties() { + return getPropertiesWithOverrides(COMMAND_CONSUMER_PREFIX); + } + + public Map getCommandProducerProperties() { + return getPropertiesWithOverrides(COMMAND_PRODUCER_PREFIX); + } + + public Map getKsqlStreamsProperties() { + return getPropertiesWithOverrides(KSQL_STREAMS_PREFIX); + } + + public String getCommandTopic() { + return String.format( + "%s_%s", + KsqlConfig.KSQL_SERVICE_ID_DEFAULT, + getString(COMMAND_TOPIC_SUFFIX_CONFIG) + ); + } +} diff --git a/ksql-rest-app/src/main/java/io/confluent/ksql/rest/server/StatementParser.java b/ksql-rest-app/src/main/java/io/confluent/ksql/rest/server/StatementParser.java new file mode 100644 index 000000000000..93d35868e657 --- /dev/null +++ b/ksql-rest-app/src/main/java/io/confluent/ksql/rest/server/StatementParser.java @@ -0,0 +1,31 @@ +/** + * Copyright 2017 Confluent Inc. + **/ + +package io.confluent.ksql.rest.server; + +import io.confluent.ksql.KsqlEngine; +import io.confluent.ksql.parser.tree.Statement; + +import java.util.List; + +public class StatementParser { + private final KsqlEngine ksqlEngine; + + public StatementParser(KsqlEngine ksqlEngine) { + this.ksqlEngine = ksqlEngine; + } + + public Statement parseSingleStatement(String statementString) throws Exception { + List statements = ksqlEngine.getStatements(statementString); + if (statements == null) { + throw new IllegalArgumentException("Call to KsqlEngine.getStatements() returned null"); + } else if ((statements.size() != 1)) { + throw new IllegalArgumentException( + String.format("Expected exactly one KSQL statement; found %d instead", statements.size()) + ); + } else { + return statements.get(0); + } + } +} diff --git a/ksql-rest-app/src/main/java/io/confluent/ksql/rest/server/computation/Command.java b/ksql-rest-app/src/main/java/io/confluent/ksql/rest/server/computation/Command.java new file mode 100644 index 000000000000..4de024727547 --- /dev/null +++ b/ksql-rest-app/src/main/java/io/confluent/ksql/rest/server/computation/Command.java @@ -0,0 +1,52 @@ +/** + * Copyright 2017 Confluent Inc. + **/ + +package io.confluent.ksql.rest.server.computation; + +import com.fasterxml.jackson.annotation.JsonCreator; +import com.fasterxml.jackson.annotation.JsonProperty; + +import java.util.Map; +import java.util.HashMap; +import java.util.Objects; + +public class Command { + private final String statement; + private final Map streamsProperties; + + @JsonCreator + public Command( + @JsonProperty("statement") String statement, + @JsonProperty("streamsProperties") Map streamsProperties + ) { + this.statement = statement; + this.streamsProperties = streamsProperties; + } + + public String getStatement() { + return statement; + } + + public Map getStreamsProperties() { + return new HashMap<>(streamsProperties); + } + + @Override + public boolean equals(Object o) { + if (this == o) { + return true; + } + if (!(o instanceof Command)) { + return false; + } + Command command = (Command) o; + return Objects.equals(getStatement(), command.getStatement()) + && Objects.equals(getStreamsProperties(), command.getStreamsProperties()); + } + + @Override + public int hashCode() { + return Objects.hash(getStatement(), getStreamsProperties()); + } +} diff --git a/ksql-rest-app/src/main/java/io/confluent/ksql/rest/server/computation/CommandId.java b/ksql-rest-app/src/main/java/io/confluent/ksql/rest/server/computation/CommandId.java new file mode 100644 index 000000000000..5f7fb0c84e71 --- /dev/null +++ b/ksql-rest-app/src/main/java/io/confluent/ksql/rest/server/computation/CommandId.java @@ -0,0 +1,72 @@ +/** + * Copyright 2017 Confluent Inc. + **/ + +package io.confluent.ksql.rest.server.computation; + +import com.fasterxml.jackson.annotation.JsonCreator; +import com.fasterxml.jackson.annotation.JsonValue; + +import java.util.Objects; + +public class CommandId { + private final Type type; + private final String entity; + + public enum Type { + TOPIC, + STREAM, + TABLE, + TERMINATE + } + + public CommandId(Type type, String entity) { + this.type = type; + this.entity = entity; + } + + public CommandId(String type, String entity) { + this(Type.valueOf(type.toUpperCase()), entity); + } + + @JsonCreator + public static CommandId fromString(String fromString) { + String[] splitOnSlash = fromString.split("/", 2); + if (splitOnSlash.length != 2) { + throw new IllegalArgumentException("Expected a string of the form /"); + } + return new CommandId(splitOnSlash[0], splitOnSlash[1]); + } + + public Type getType() { + return type; + } + + public String getEntity() { + return entity; + } + + @Override + @JsonValue + public String toString() { + return String.format("%s/%s", type.toString().toLowerCase(), entity); + } + + @Override + public boolean equals(Object o) { + if (this == o) { + return true; + } + if (!(o instanceof CommandId)) { + return false; + } + CommandId commandId = (CommandId) o; + return getType() == commandId.getType() + && Objects.equals(getEntity(), commandId.getEntity()); + } + + @Override + public int hashCode() { + return Objects.hash(getType(), getEntity()); + } +} diff --git a/ksql-rest-app/src/main/java/io/confluent/ksql/rest/server/computation/CommandIdAssigner.java b/ksql-rest-app/src/main/java/io/confluent/ksql/rest/server/computation/CommandIdAssigner.java new file mode 100644 index 000000000000..d9fe0cf9022f --- /dev/null +++ b/ksql-rest-app/src/main/java/io/confluent/ksql/rest/server/computation/CommandIdAssigner.java @@ -0,0 +1,114 @@ +/** + * Copyright 2017 Confluent Inc. + **/ + +package io.confluent.ksql.rest.server.computation; + +import io.confluent.ksql.metastore.MetaStore; +import io.confluent.ksql.parser.tree.CreateStream; +import io.confluent.ksql.parser.tree.CreateStreamAsSelect; +import io.confluent.ksql.parser.tree.CreateTable; +import io.confluent.ksql.parser.tree.CreateTableAsSelect; +import io.confluent.ksql.parser.tree.RunScript; +import io.confluent.ksql.parser.tree.RegisterTopic; +import io.confluent.ksql.parser.tree.DropStream; +import io.confluent.ksql.parser.tree.DropTable; +import io.confluent.ksql.parser.tree.DropTopic; +import io.confluent.ksql.parser.tree.Statement; +import io.confluent.ksql.parser.tree.TerminateQuery; + +public class CommandIdAssigner { + + private final MetaStore metaStore; + + public CommandIdAssigner(MetaStore metaStore) { + this.metaStore = metaStore; + } + + public CommandId getCommandId(Statement command) { + if (command instanceof RegisterTopic) { + return getTopicCommandId((RegisterTopic) command); + } else if (command instanceof CreateStream) { + return getTopicStreamCommandId((CreateStream) command); + } else if (command instanceof CreateTable) { + return getTopicTableCommandId((CreateTable) command); + } else if (command instanceof CreateStreamAsSelect) { + return getSelectStreamCommandId((CreateStreamAsSelect) command); + } else if (command instanceof CreateTableAsSelect) { + return getSelectTableCommandId((CreateTableAsSelect) command); + } else if (command instanceof TerminateQuery) { + return getTerminateCommandId((TerminateQuery) command); + } else if (command instanceof DropTopic) { + return getDropTopicCommandId((DropTopic) command); + } else if (command instanceof DropStream) { + return getDropStreamCommandId((DropStream) command); + } else if (command instanceof DropTable) { + return getDropTableCommandId((DropTable) command); + } else if (command instanceof RunScript) { + return new CommandId(CommandId.Type.STREAM, "RunScript"); + } else { + throw new RuntimeException(String.format( + "Cannot assign command ID to statement of type %s", + command.getClass().getCanonicalName() + )); + } + } + + public CommandId getTopicCommandId(RegisterTopic registerTopic) { + String topicName = registerTopic.getName().toString(); + if (metaStore.getAllTopicNames().contains(topicName)) { + throw new RuntimeException(String.format("Topic %s already exists", topicName)); + } + return new CommandId(CommandId.Type.TOPIC, topicName); + } + + public CommandId getTopicStreamCommandId(CreateStream createStream) { + return getStreamCommandId(createStream.getName().toString()); + } + + public CommandId getSelectStreamCommandId(CreateStreamAsSelect createStreamAsSelect) { + return getStreamCommandId(createStreamAsSelect.getName().toString()); + } + + public CommandId getTopicTableCommandId(CreateTable createTable) { + return getTableCommandId(createTable.getName().toString()); + } + + public CommandId getSelectTableCommandId(CreateTableAsSelect createTableAsSelect) { + return getTableCommandId(createTableAsSelect.getName().toString()); + } + + public CommandId getTerminateCommandId(TerminateQuery terminateQuery) { + return new CommandId(CommandId.Type.TERMINATE, Long.toString(terminateQuery.getQueryId())); + } + + public CommandId getDropTopicCommandId(DropTopic dropTopicQuery) { + return new CommandId(CommandId.Type.TOPIC, + dropTopicQuery.getTopicName().getSuffix() + "_DROP"); + } + + public CommandId getDropStreamCommandId(DropStream dropStreamQuery) { + return new CommandId(CommandId.Type.STREAM, + dropStreamQuery.getName().getSuffix() + "_DROP"); + } + + public CommandId getDropTableCommandId(DropTable dropTableQuery) { + return new CommandId(CommandId.Type.TABLE, + dropTableQuery.getName().getSuffix() + "_DROP"); + } + + private CommandId getStreamCommandId(String streamName) { + return getSourceCommandId(CommandId.Type.STREAM, streamName); + } + + private CommandId getTableCommandId(String tableName) { + return getSourceCommandId(CommandId.Type.TABLE, tableName); + } + + private CommandId getSourceCommandId(CommandId.Type type, String sourceName) { + if (metaStore.getAllStructuredDataSourceNames().contains(sourceName)) { + throw new RuntimeException(String.format("Source %s already exists", sourceName)); + } + return new CommandId(type, sourceName); + } +} diff --git a/ksql-rest-app/src/main/java/io/confluent/ksql/rest/server/computation/CommandRunner.java b/ksql-rest-app/src/main/java/io/confluent/ksql/rest/server/computation/CommandRunner.java new file mode 100644 index 000000000000..d154d5510e2c --- /dev/null +++ b/ksql-rest-app/src/main/java/io/confluent/ksql/rest/server/computation/CommandRunner.java @@ -0,0 +1,107 @@ +/** + * Copyright 2017 Confluent Inc. + **/ + +package io.confluent.ksql.rest.server.computation; + +import org.apache.kafka.clients.consumer.ConsumerRecord; +import org.apache.kafka.clients.consumer.ConsumerRecords; +import org.apache.kafka.common.errors.WakeupException; +import org.slf4j.Logger; +import org.slf4j.LoggerFactory; + +import java.io.Closeable; +import java.io.PrintWriter; +import java.io.StringWriter; +import java.util.ArrayList; +import java.util.LinkedHashMap; +import java.util.List; +import java.util.Map; +import java.util.concurrent.atomic.AtomicBoolean; + +import io.confluent.ksql.util.Pair; + +/** + * Handles the logic of reading distributed commands, including pre-existing commands that were + * issued before being initialized, and then delegating their execution to a + * {@link StatementExecutor}. Also responsible for taking care of any exceptions that occur in the + * process. + */ +public class CommandRunner implements Runnable, Closeable { + + private static final Logger log = LoggerFactory.getLogger(CommandRunner.class); + + private final StatementExecutor statementExecutor; + private final CommandStore commandStore; + private final AtomicBoolean closed; + + public CommandRunner( + StatementExecutor statementExecutor, + CommandStore commandStore + ) { + this.statementExecutor = statementExecutor; + this.commandStore = commandStore; + + closed = new AtomicBoolean(false); + } + + /** + * Begin a continuous poll-execute loop for the command topic, stopping only if either a + * {@link WakeupException} is thrown or the {@link #close()} method is called. + */ + @Override + public void run() { + try { + while (!closed.get()) { + log.debug("Polling for new writes to command topic"); + ConsumerRecords records = commandStore.getNewCommands(); + log.debug("Found {} new writes to command topic", records.count()); + for (ConsumerRecord record : records) { + CommandId commandId = record.key(); + Command command = record.value(); + if (command.getStatement() != null) { + executeStatement(command, commandId); + } else { + log.debug("Skipping null statement for ID {}", commandId); + } + } + } + } catch (WakeupException wue) { + if (!closed.get()) { + throw wue; + } + } + } + + /** + * Halt the poll-execute loop. + */ + @Override + public void close() { + closed.set(true); + commandStore.close(); + } + + /** + * Read and execute all commands on the command topic, starting at the earliest offset. + * @throws Exception TODO: Refine this. + */ + public void processPriorCommands() throws Exception { + List> priorCommands = commandStore.getPriorCommands(); + statementExecutor.handleStatements(priorCommands); + } + + private void executeStatement(Command command, CommandId commandId) { + log.info("Executing statement: " + command.getStatement()); + try { + statementExecutor.handleStatement(command, commandId); + } catch (WakeupException wue) { + throw wue; + } catch (Exception exception) { + StringWriter stringWriter = new StringWriter(); + PrintWriter printWriter = new PrintWriter(stringWriter); + exception.printStackTrace(printWriter); + log.error("Exception encountered during poll-parse-execute loop: " + stringWriter.toString()); + } + } +} diff --git a/ksql-rest-app/src/main/java/io/confluent/ksql/rest/server/computation/CommandStore.java b/ksql-rest-app/src/main/java/io/confluent/ksql/rest/server/computation/CommandStore.java new file mode 100644 index 000000000000..d970742eacce --- /dev/null +++ b/ksql-rest-app/src/main/java/io/confluent/ksql/rest/server/computation/CommandStore.java @@ -0,0 +1,199 @@ +/** + * Copyright 2017 Confluent Inc. + **/ + +package io.confluent.ksql.rest.server.computation; + +import io.confluent.ksql.parser.tree.Statement; +import io.confluent.ksql.util.Pair; + +import org.apache.kafka.clients.consumer.Consumer; +import org.apache.kafka.clients.consumer.ConsumerRecord; +import org.apache.kafka.clients.consumer.ConsumerRecords; +import org.apache.kafka.clients.producer.Producer; +import org.apache.kafka.clients.producer.ProducerRecord; +import org.apache.kafka.common.PartitionInfo; +import org.apache.kafka.common.TopicPartition; +import org.slf4j.Logger; +import org.slf4j.LoggerFactory; + +import java.io.Closeable; +import java.util.ArrayList; +import java.util.Collection; +import java.util.Collections; +import java.util.HashMap; +import java.util.HashSet; +import java.util.LinkedHashMap; +import java.util.List; +import java.util.Map; +import java.util.concurrent.atomic.AtomicBoolean; + +/** + * Wrapper class for the command topic. Used for reading from the topic (either all messages from + * the beginning until now, or any new messages since then), and writing to it. + */ +public class CommandStore implements Closeable { + + private static final Logger log = LoggerFactory.getLogger(CommandStore.class); + + private final String commandTopic; + private final Consumer commandConsumer; + private final Producer commandProducer; + private final CommandIdAssigner commandIdAssigner; + private final AtomicBoolean closed; + + public CommandStore( + String commandTopic, + Consumer commandConsumer, + Producer commandProducer, + CommandIdAssigner commandIdAssigner + ) { + this.commandTopic = commandTopic; + // TODO: Remove commandConsumer/commandProducer as parameters if not needed in testing + this.commandConsumer = commandConsumer; + this.commandProducer = commandProducer; + this.commandIdAssigner = commandIdAssigner; + + commandConsumer.assign(Collections.singleton(new TopicPartition(commandTopic, 0))); + + closed = new AtomicBoolean(false); + } + + /** + * Close the store, rendering it unable to read or write commands + */ + @Override + public void close() { + closed.set(true); + commandConsumer.wakeup(); + commandProducer.close(); + } + + /** + * Write the given statement to the command topic, to be read by all nodes in the current cluster. + * Does not return until the statement has been successfully written, or an exception is thrown. + * @param statementString The string of the statement to be distributed + * @param statement The statement to be distributed + * @param streamsProperties Any command-specific Streams properties to use. + * @return The ID assigned to the statement + * @throws Exception TODO: Refine this + */ + public CommandId distributeStatement( + String statementString, + Statement statement, + Map streamsProperties + ) throws Exception { + CommandId commandId = commandIdAssigner.getCommandId(statement); + Command command = new Command(statementString, streamsProperties); + commandProducer.send(new ProducerRecord<>(commandTopic, commandId, command)).get(); + return commandId; + } + + /** + * Poll for new commands, blocking until at least one is available. + * @return The commands that have been polled from the command topic + */ + public ConsumerRecords getNewCommands() { + return commandConsumer.poll(Long.MAX_VALUE); + } + + /** + * Collect all commands that have been written to the command topic, starting at the earliest + * offset and proceeding until it appears that all have been returned. + * @return The commands that have been read from the command topic + */ + public List> getPriorCommands() { + List> result = new ArrayList<>(); + for (ConsumerRecord commandRecord : getAllPriorCommandRecords()) { + CommandId commandId = commandRecord.key(); + Command command = commandRecord.value(); + if (command != null) { + result.add(new Pair<>(commandId, command)); + } + } + + return result; + } + + private List> getAllPriorCommandRecords() { + Collection commandTopicPartitions = getTopicPartitionsForTopic(commandTopic); + + // Have to poll to make sure subscription has taken effect (subscribe() is lazy) + commandConsumer.poll(0); + commandConsumer.seekToBeginning(commandTopicPartitions); + + Map currentOffsets = new HashMap<>(); + + List> result = new ArrayList<>(); + log.debug("Polling end offset(s) for command topic"); + Map endOffsets = commandConsumer.endOffsets(commandTopicPartitions); + // Only want to poll for end offsets at the very beginning, and when we think we may be caught + // up. So, this outer loop tries to catch up (via the inner loop), then when it believes it has + // (signalled by having exited the inner loop), end offsets are polled again and another check + // is performed to see if anything new has been written (which would be signalled by the end + // offsets having changed). If something new has been written, the outer loop is repeated; if + // not, we're caught up to the end offsets we just polled and can continue. + do { + while (!offsetsCaughtUp(currentOffsets, endOffsets)) { + log.debug("Polling for prior command records"); + ConsumerRecords records = commandConsumer.poll(30000); + if (records.isEmpty()) { + log.warn("No records received after 30 seconds of polling; something may be wrong"); + } else { + log.debug("Received {} records from poll", records.count()); + for (ConsumerRecord record : records) { + result.add(record); + TopicPartition recordTopicPartition = + new TopicPartition(record.topic(), record.partition()); + Long currentOffset = currentOffsets.get(recordTopicPartition); + if (currentOffset == null || currentOffset < record.offset()) { + currentOffsets.put(recordTopicPartition, record.offset()); + } + } + } + } + log.debug("Polling end offset(s) for command topic"); + endOffsets = commandConsumer.endOffsets(commandTopicPartitions); + } while (!offsetsCaughtUp(currentOffsets, endOffsets)); + return result; + } + + private Collection getTopicPartitionsForTopic(String topic) { + List partitionInfoList = commandConsumer.partitionsFor(topic); + + Collection result = new HashSet<>(); + for (PartitionInfo partitionInfo : partitionInfoList) { + result.add(new TopicPartition(partitionInfo.topic(), partitionInfo.partition())); + } + + return result; + } + + private boolean offsetsCaughtUp( + Map offsets, + Map endOffsets + ) { + log.debug("Checking to see if consumed command records are caught up with end offset(s)"); + for (Map.Entry endOffset : endOffsets.entrySet()) { + long offset = offsets.getOrDefault(endOffset.getKey(), 0L); + /* + From https://kafka.apache.org/0101/javadoc/index.html?org/apache/kafka/streams/kstream/KTable.html + "The last offset of a partition is the offset of the upcoming message, + i.e. the offset of the last available message + 1" + Hence, "offset + 1" instead of just "offset" + */ + if (offset + 1 < endOffset.getValue()) { + log.debug( + "Consumed command records are not yet caught up with offset for partition {}; " + + "end offset is {}, but last consumed offset is {}", + endOffset.getKey().partition(), + endOffset.getValue(), + offset + ); + return false; + } + } + log.debug("Consumed command records are caught up with end offset(s)"); + return true; + } +} diff --git a/ksql-rest-app/src/main/java/io/confluent/ksql/rest/server/computation/StatementExecutor.java b/ksql-rest-app/src/main/java/io/confluent/ksql/rest/server/computation/StatementExecutor.java new file mode 100644 index 000000000000..fc4567a4ef7b --- /dev/null +++ b/ksql-rest-app/src/main/java/io/confluent/ksql/rest/server/computation/StatementExecutor.java @@ -0,0 +1,449 @@ +/** + * Copyright 2017 Confluent Inc. + **/ + +package io.confluent.ksql.rest.server.computation; + +import io.confluent.ksql.KsqlEngine; +import io.confluent.ksql.ddl.DdlConfig; +import io.confluent.ksql.ddl.commands.*; +import io.confluent.ksql.exception.ExceptionUtil; +import io.confluent.ksql.metastore.DataSource; +import io.confluent.ksql.parser.tree.CreateStream; +import io.confluent.ksql.parser.tree.CreateStreamAsSelect; +import io.confluent.ksql.parser.tree.CreateTable; +import io.confluent.ksql.parser.tree.CreateTableAsSelect; +import io.confluent.ksql.parser.tree.RunScript; +import io.confluent.ksql.parser.tree.RegisterTopic; +import io.confluent.ksql.parser.tree.DropStream; +import io.confluent.ksql.parser.tree.DropTable; +import io.confluent.ksql.parser.tree.DropTopic; +import io.confluent.ksql.parser.tree.Query; +import io.confluent.ksql.parser.tree.QuerySpecification; +import io.confluent.ksql.parser.tree.Relation; +import io.confluent.ksql.parser.tree.Statement; +import io.confluent.ksql.parser.tree.Table; +import io.confluent.ksql.parser.tree.TerminateQuery; +import io.confluent.ksql.planner.plan.KsqlStructuredDataOutputNode; +import io.confluent.ksql.rest.entity.CommandStatus; +import io.confluent.ksql.rest.server.StatementParser; +import io.confluent.ksql.util.KsqlException; +import io.confluent.ksql.util.Pair; +import io.confluent.ksql.util.PersistentQueryMetadata; +import io.confluent.ksql.util.QueryMetadata; +import org.apache.kafka.common.errors.WakeupException; +import org.slf4j.Logger; +import org.slf4j.LoggerFactory; + +import java.util.Collections; +import java.util.HashMap; +import java.util.List; +import java.util.Map; +import java.util.Optional; +import java.util.concurrent.Future; +import java.util.concurrent.TimeUnit; +import java.util.concurrent.TimeoutException; +import java.util.concurrent.atomic.AtomicReference; +import java.util.regex.Matcher; +import java.util.regex.Pattern; + +/** + * Handles the actual execution (or delegation to KSQL core) of all distributed statements, as well + * as tracking their statuses as things move along. + */ +public class StatementExecutor { + + private static final Logger log = LoggerFactory.getLogger(StatementExecutor.class); + + private static final Pattern TERMINATE_PATTERN = + Pattern.compile("\\s*TERMINATE\\s+([0-9]+)\\s*;?\\s*"); + + private final KsqlEngine ksqlEngine; + private final StatementParser statementParser; + private final Map statusStore; + private final Map statusFutures; + + public StatementExecutor( + KsqlEngine ksqlEngine, + StatementParser statementParser + ) { + this.ksqlEngine = ksqlEngine; + this.statementParser = statementParser; + + this.statusStore = new HashMap<>(); + this.statusFutures = new HashMap<>(); + } + + public void handleStatements(List> priorCommands) throws Exception { + for (Pair commandIdCommandPair: priorCommands) { + log.info("Executing prior statement: '{}'", commandIdCommandPair.getRight()); + try { + handleStatementWithTerminatedQueries( + commandIdCommandPair.getRight(), + commandIdCommandPair.getLeft(), + Collections.emptyMap() + ); + } catch (Exception exception) { + log.warn("Failed to execute statement due to exception", exception); + } + } + } + + /** + * Attempt to execute a single statement. + * @param command The string containing the statement to be executed + * @param commandId The ID to be used to track the status of the command + * @throws Exception TODO: Refine this. + */ + public void handleStatement( + Command command, + CommandId commandId + ) throws Exception { + handleStatementWithTerminatedQueries(command, commandId, null); + } + + /** + * Get details on the statuses of all the statements handled thus far. + * @return A map detailing the current statuses of all statements that the handler has executed + * (or attempted to execute). + */ + public Map getStatuses() { + return new HashMap<>(statusStore); + } + + /** + * @param statementId The ID of the statement to check the status of. + * @return Information on the status of the statement with the given ID, if one exists. + */ + public Optional getStatus(CommandId statementId) { + return Optional.ofNullable(statusStore.get(statementId)); + } + + /** + * Register the existence of a new statement that has been written to the command topic. All other + * statement status information is updated exclusively by the current {@link StatementExecutor} + * instance, but in the (unlikely but possible) event that a statement is written to the command + * topic but never picked up by this instance, it should be possible to know that it was at least + * written to the topic in the first place. + * @param commandId The ID of the statement that has been written to the command topic. + */ + public Future registerQueuedStatement(CommandId commandId) { + statusStore.put( + commandId, + new CommandStatus(CommandStatus.Status.QUEUED, "Statement written to command topic") + ); + + CommandStatusFuture result; + synchronized (statusFutures) { + result = statusFutures.get(commandId); + if (result != null) { + return result; + } else { + result = new CommandStatusFuture(commandId); + statusFutures.put(commandId, result); + return result; + } + } + } + + private void completeStatusFuture(CommandId commandId, CommandStatus commandStatus) { + synchronized (statusFutures) { + CommandStatusFuture statusFuture = statusFutures.get(commandId); + if (statusFuture != null) { + statusFuture.complete(commandStatus); + } else { + CommandStatusFuture newStatusFuture = new CommandStatusFuture(commandId); + newStatusFuture.complete(commandStatus); + statusFutures.put(commandId, newStatusFuture); + } + } + } + + private Map getTerminatedQueries(Map commands) { + Map result = new HashMap<>(); + + for (Map.Entry commandEntry : commands.entrySet()) { + CommandId commandId = commandEntry.getKey(); + String command = commandEntry.getValue().getStatement(); + Matcher terminateMatcher = TERMINATE_PATTERN.matcher(command.toUpperCase()); + if (terminateMatcher.matches()) { + Long queryId = Long.parseLong(terminateMatcher.group(1)); + result.put(queryId, commandId); + } + } + + return result; + } + + /** + * Attempt to execute a single statement. +// * @param statementString The string containing the statement to be executed + * @param command The string containing the statement to be executed + * @param commandId The ID to be used to track the status of the command + * @param terminatedQueries An optional map from terminated query IDs to the commands that + * requested their termination + * @throws Exception TODO: Refine this. + */ + private void handleStatementWithTerminatedQueries( + Command command, + CommandId commandId, + Map terminatedQueries + ) throws Exception { + try { + String statementString = command.getStatement(); + statusStore.put( + commandId, + new CommandStatus(CommandStatus.Status.PARSING, "Parsing statement") + ); + Statement statement = statementParser.parseSingleStatement(statementString); + statusStore.put( + commandId, + new CommandStatus(CommandStatus.Status.EXECUTING, "Executing statement") + ); + executeStatement(statement, command, commandId, terminatedQueries); + } catch (WakeupException exception) { + throw exception; + } catch (Exception exception) { + String stackTraceString = ExceptionUtil.stackTraceToString(exception); + log.error(stackTraceString); + CommandStatus errorStatus = new CommandStatus(CommandStatus.Status.ERROR, stackTraceString); + statusStore.put(commandId, errorStatus); + completeStatusFuture(commandId, errorStatus); + } + } + + private void executeStatement( + Statement statement, + Command command, + CommandId commandId, + Map terminatedQueries + ) throws Exception { + String statementStr = command.getStatement(); + + DDLCommandResult result = null; + String successMessage = ""; + + if (statement instanceof RegisterTopic + || statement instanceof CreateStream + || statement instanceof CreateTable + || statement instanceof DropTopic + || statement instanceof DropStream + || statement instanceof DropTable + ) { + result = + ksqlEngine.getQueryEngine().handleDdlStatement(statement, command.getStreamsProperties()); + } else if (statement instanceof CreateStreamAsSelect) { + CreateStreamAsSelect createStreamAsSelect = (CreateStreamAsSelect) statement; + QuerySpecification querySpecification = + (QuerySpecification) createStreamAsSelect.getQuery().getQueryBody(); + Query query = ksqlEngine.addInto( + createStreamAsSelect.getQuery(), + querySpecification, + createStreamAsSelect.getName().getSuffix(), + createStreamAsSelect.getProperties(), + createStreamAsSelect.getPartitionByColumn() + ); + if (startQuery(statementStr, query, commandId, terminatedQueries)) { + successMessage = "Stream created and running"; + } else { + return; + } + } else if (statement instanceof CreateTableAsSelect) { + CreateTableAsSelect createTableAsSelect = (CreateTableAsSelect) statement; + QuerySpecification querySpecification = + (QuerySpecification) createTableAsSelect.getQuery().getQueryBody(); + Query query = ksqlEngine.addInto( + createTableAsSelect.getQuery(), + querySpecification, + createTableAsSelect.getName().getSuffix(), + createTableAsSelect.getProperties(), + Optional.empty() + ); + if (startQuery(statementStr, query, commandId, terminatedQueries)) { + successMessage = "Table created and running"; + } else { + return; + } + } else if (statement instanceof TerminateQuery) { + terminateQuery((TerminateQuery) statement); + successMessage = "Query terminated."; + } else if (statement instanceof RunScript) { + if (command.getStreamsProperties().containsKey(DdlConfig.SCHEMA_FILE_CONTENT_PROPERTY)) { + String queries = + (String) command.getStreamsProperties().get(DdlConfig.SCHEMA_FILE_CONTENT_PROPERTY); + List queryMetadataList = ksqlEngine.buildMultipleQueries(false, queries, + command.getStreamsProperties()); + for (QueryMetadata queryMetadata : queryMetadataList) { + if (queryMetadata instanceof PersistentQueryMetadata) { + PersistentQueryMetadata persistentQueryMetadata = (PersistentQueryMetadata) queryMetadata; + persistentQueryMetadata.getKafkaStreams().start(); + } + } + } else { + throw new KsqlException("No statements received for LOAD FROM FILE."); + } + + }else { + throw new Exception(String.format( + "Unexpected statement type: %s", + statement.getClass().getName() + )); + } + // TODO: change to unified return message + CommandStatus successStatus = new CommandStatus(CommandStatus.Status.SUCCESS, + result != null ? result.getMessage(): successMessage); + statusStore.put(commandId, successStatus); + completeStatusFuture(commandId, successStatus); + } + + private boolean startQuery( + String queryString, + Query query, + CommandId commandId, + Map terminatedQueries + ) throws Exception { + if (query.getQueryBody() instanceof QuerySpecification) { + QuerySpecification querySpecification = (QuerySpecification) query.getQueryBody(); + Optional into = querySpecification.getInto(); + if (into.isPresent() && into.get() instanceof Table) { + Table table = (Table) into.get(); + if (ksqlEngine.getMetaStore().getSource(table.getName().getSuffix()) != null) { + throw new Exception(String.format( + "Sink specified in INTO clause already exists: %s", + table.getName().getSuffix().toUpperCase() + )); + } + } + } + + QueryMetadata queryMetadata = ksqlEngine.buildMultipleQueries( + false, queryString, Collections.emptyMap()).get(0); + + if (queryMetadata instanceof PersistentQueryMetadata) { + PersistentQueryMetadata persistentQueryMetadata = (PersistentQueryMetadata) queryMetadata; + long queryId = persistentQueryMetadata.getId(); + + if (terminatedQueries != null && terminatedQueries.containsKey(queryId)) { + CommandId terminateId = terminatedQueries.get(queryId); + statusStore.put( + terminateId, + new CommandStatus(CommandStatus.Status.SUCCESS, "Termination request granted") + ); + statusStore.put( + commandId, + new CommandStatus(CommandStatus.Status.TERMINATED, "Query terminated") + ); + ksqlEngine.terminateQuery(queryId, false); + return false; + } else { + persistentQueryMetadata.getKafkaStreams().start(); + return true; + } + + } else { + throw new Exception(String.format( + "Unexpected query metadata type: %s; was expecting %s", + queryMetadata.getClass().getCanonicalName(), + PersistentQueryMetadata.class.getCanonicalName() + )); + } + } + + private void terminateQuery(TerminateQuery terminateQuery) throws Exception { + long queryId = terminateQuery.getQueryId(); + QueryMetadata queryMetadata = ksqlEngine.getPersistentQueries().get(queryId); + if (!ksqlEngine.terminateQuery(queryId, true)) { + throw new Exception(String.format("No running query with id %d was found", queryId)); + } + + CommandId.Type commandType; + DataSource.DataSourceType sourceType = + queryMetadata.getOutputNode().getTheSourceNode().getDataSourceType(); + switch (sourceType) { + case KTABLE: + commandType = CommandId.Type.TABLE; + break; + case KSTREAM: + commandType = CommandId.Type.STREAM; + break; + default: + throw new + Exception(String.format("Unexpected source type for running query: %s", sourceType)); + } + + String queryEntity = + ((KsqlStructuredDataOutputNode) queryMetadata.getOutputNode()).getKsqlTopic().getName(); + + CommandId queryStatementId = new CommandId(commandType, queryEntity); + statusStore.put( + queryStatementId, + new CommandStatus(CommandStatus.Status.TERMINATED, "Query terminated") + ); + } + + private class CommandStatusFuture implements Future { + + private final CommandId commandId; + private final AtomicReference result; + + public CommandStatusFuture(CommandId commandId) { + this.commandId = commandId; + this.result = new AtomicReference<>(null); + } + + @Override + public boolean cancel(boolean mayInterruptIfRunning) { + return false; // TODO: Is an implementation of this method necessary? + } + + @Override + public boolean isCancelled() { + return false; // TODO: Is an implementation of this method necessary? + } + + @Override + public boolean isDone() { + return result.get() != null; + } + + @Override + public CommandStatus get() throws InterruptedException { + synchronized (result) { + while (result.get() == null) { + result.wait(); + } + removeFromFutures(); + return result.get(); + } + } + + @Override + public CommandStatus get(long timeout, TimeUnit unit) + throws InterruptedException, TimeoutException { + long endTimeMs = System.currentTimeMillis() + unit.toMillis(timeout); + synchronized (result) { + while (System.currentTimeMillis() < endTimeMs && result.get() == null) { + result.wait(Math.max(1, endTimeMs - System.currentTimeMillis())); + } + if (result.get() == null) { + throw new TimeoutException(); + } + removeFromFutures(); + return result.get(); + } + } + + private void complete(CommandStatus result) { + synchronized (this.result) { + this.result.set(result); + this.result.notifyAll(); + } + } + + private void removeFromFutures() { + synchronized (statusFutures) { + statusFutures.remove(commandId); + } + } + } +} diff --git a/ksql-rest-app/src/main/java/io/confluent/ksql/rest/server/resources/KsqlExceptionMapper.java b/ksql-rest-app/src/main/java/io/confluent/ksql/rest/server/resources/KsqlExceptionMapper.java new file mode 100644 index 000000000000..181184d15840 --- /dev/null +++ b/ksql-rest-app/src/main/java/io/confluent/ksql/rest/server/resources/KsqlExceptionMapper.java @@ -0,0 +1,25 @@ +/** + * Copyright 2017 Confluent Inc. + **/ + +package io.confluent.ksql.rest.server.resources; + +import io.confluent.ksql.rest.entity.ErrorMessage; + +import javax.ws.rs.core.MediaType; +import javax.ws.rs.core.Response; +import javax.ws.rs.ext.ExceptionMapper; + +public class KsqlExceptionMapper implements ExceptionMapper { + + @Override + public Response toResponse(Throwable exception) { + // TODO: Distinguish between exceptions that warrant a stack trace and ones that don't + // TODO: Return actually meaningful status codes + return Response + .status(Response.Status.BAD_REQUEST) + .type(MediaType.APPLICATION_JSON_TYPE) + .entity(new ErrorMessage(exception)) + .build(); + } +} diff --git a/ksql-rest-app/src/main/java/io/confluent/ksql/rest/server/resources/KsqlResource.java b/ksql-rest-app/src/main/java/io/confluent/ksql/rest/server/resources/KsqlResource.java new file mode 100644 index 000000000000..d06396c5c7ed --- /dev/null +++ b/ksql-rest-app/src/main/java/io/confluent/ksql/rest/server/resources/KsqlResource.java @@ -0,0 +1,395 @@ +/** + * Copyright 2017 Confluent Inc. + **/ + +package io.confluent.ksql.rest.server.resources; + +import io.confluent.ksql.KsqlEngine; +import io.confluent.ksql.ddl.commands.CreateStreamCommand; +import io.confluent.ksql.ddl.commands.CreateTableCommand; +import io.confluent.ksql.ddl.commands.DDLCommandExec; +import io.confluent.ksql.ddl.commands.DropSourceCommand; +import io.confluent.ksql.ddl.commands.DropTopicCommand; +import io.confluent.ksql.ddl.commands.RegisterTopicCommand; +import io.confluent.ksql.exception.ExceptionUtil; +import io.confluent.ksql.metastore.DataSource; +import io.confluent.ksql.metastore.KsqlStream; +import io.confluent.ksql.metastore.KsqlTable; +import io.confluent.ksql.metastore.KsqlTopic; +import io.confluent.ksql.metastore.StructuredDataSource; +import io.confluent.ksql.parser.KsqlParser; +import io.confluent.ksql.parser.SqlBaseParser; +import io.confluent.ksql.parser.tree.CreateStream; +import io.confluent.ksql.parser.tree.CreateStreamAsSelect; +import io.confluent.ksql.parser.tree.CreateTable; +import io.confluent.ksql.parser.tree.CreateTableAsSelect; +import io.confluent.ksql.parser.tree.ListTopics; +import io.confluent.ksql.parser.tree.RunScript; +import io.confluent.ksql.parser.tree.RegisterTopic; +import io.confluent.ksql.parser.tree.DropStream; +import io.confluent.ksql.parser.tree.DropTable; +import io.confluent.ksql.parser.tree.DropTopic; +import io.confluent.ksql.parser.tree.Explain; +import io.confluent.ksql.parser.tree.ListProperties; +import io.confluent.ksql.parser.tree.ListQueries; +import io.confluent.ksql.parser.tree.ListStreams; +import io.confluent.ksql.parser.tree.ListTables; +import io.confluent.ksql.parser.tree.ListRegisteredTopics; +import io.confluent.ksql.parser.tree.Query; +import io.confluent.ksql.parser.tree.ShowColumns; +import io.confluent.ksql.parser.tree.Statement; +import io.confluent.ksql.parser.tree.TerminateQuery; +import io.confluent.ksql.planner.plan.KsqlStructuredDataOutputNode; +import io.confluent.ksql.rest.entity.CommandStatus; +import io.confluent.ksql.rest.entity.CommandStatusEntity; +import io.confluent.ksql.rest.entity.ErrorMessageEntity; +import io.confluent.ksql.rest.entity.ExecutionPlan; +import io.confluent.ksql.rest.entity.KafkaTopicsList; +import io.confluent.ksql.rest.entity.KsqlEntity; +import io.confluent.ksql.rest.entity.KsqlEntityList; +import io.confluent.ksql.rest.entity.KsqlRequest; +import io.confluent.ksql.rest.entity.PropertiesList; +import io.confluent.ksql.rest.entity.Queries; +import io.confluent.ksql.rest.entity.SourceDescription; +import io.confluent.ksql.rest.entity.StreamsList; +import io.confluent.ksql.rest.entity.TablesList; +import io.confluent.ksql.rest.entity.TopicDescription; +import io.confluent.ksql.rest.entity.KsqlTopicsList; +import io.confluent.ksql.rest.server.KsqlRestApplication; +import io.confluent.ksql.rest.server.computation.CommandId; +import io.confluent.ksql.rest.server.computation.CommandStore; +import io.confluent.ksql.rest.server.computation.StatementExecutor; +import io.confluent.ksql.serde.avro.KsqlAvroTopicSerDe; +import io.confluent.ksql.util.KafkaTopicClient; +import io.confluent.ksql.util.KsqlException; +import io.confluent.ksql.util.PersistentQueryMetadata; +import io.confluent.ksql.util.QueryMetadata; + +import org.antlr.v4.runtime.CharStream; +import org.antlr.v4.runtime.misc.Interval; +import org.slf4j.LoggerFactory; + +import javax.ws.rs.Consumes; +import javax.ws.rs.POST; +import javax.ws.rs.Path; +import javax.ws.rs.Produces; +import javax.ws.rs.core.MediaType; +import javax.ws.rs.core.Response; +import java.util.ArrayList; +import java.util.Collection; +import java.util.Collections; +import java.util.List; +import java.util.Map; +import java.util.Optional; +import java.util.concurrent.TimeUnit; +import java.util.concurrent.TimeoutException; +import java.util.stream.Collectors; + +@Path("/ksql") +@Consumes(MediaType.APPLICATION_JSON) +@Produces(MediaType.APPLICATION_JSON) +public class KsqlResource { + + private static final org.slf4j.Logger LOGGER = LoggerFactory.getLogger(KsqlResource.class); + + private final KsqlEngine ksqlEngine; + private final CommandStore commandStore; + private final StatementExecutor statementExecutor; + private final long distributedCommandResponseTimeout; + + public KsqlResource( + KsqlEngine ksqlEngine, + CommandStore commandStore, + StatementExecutor statementExecutor, + long distributedCommandResponseTimeout + ) { + this.ksqlEngine = ksqlEngine; + this.commandStore = commandStore; + this.statementExecutor = statementExecutor; + this.distributedCommandResponseTimeout = distributedCommandResponseTimeout; + } + + @POST + public Response handleKsqlStatements(KsqlRequest request) throws Exception { + KsqlEntityList result = new KsqlEntityList(); + try { + List parsedStatements = ksqlEngine.getStatements(request.getKsql()); + List statementStrings = getStatementStrings(request.getKsql()); + Map streamsProperties = request.getStreamsProperties(); + if (parsedStatements.size() != statementStrings.size()) { + throw new Exception(String.format( + "Size of parsed statements and statement strings differ; %d vs. %d, respectively", + parsedStatements.size(), + statementStrings.size() + )); + } + + for (int i = 0; i < parsedStatements.size(); i++) { + String statementText = statementStrings.get(i); + result.add(executeStatement(statementText, parsedStatements.get(i), streamsProperties)); + } + } catch (Exception exception) { + String stackTrace = ExceptionUtil.stackTraceToString(exception); + LOGGER.error(stackTrace); + result.add(new ErrorMessageEntity(request.getKsql(), exception)); + } + + + return Response.ok(result).build(); + } + + public List getStatementStrings(String ksqlString) { + List statementContexts = + new KsqlParser().getStatements(ksqlString); + List result = new ArrayList<>(statementContexts.size()); + for (SqlBaseParser.SingleStatementContext statementContext : statementContexts) { + // Taken from http://stackoverflow.com/questions/16343288/how-do-i-get-the-original-text-that-an-antlr4-rule-matched + CharStream charStream = statementContext.start.getInputStream(); + result.add( + charStream.getText( + new Interval( + statementContext.start.getStartIndex(), + statementContext.stop.getStopIndex() + ) + ) + ); + } + return result; + } + + public KsqlEngine getKsqlEngine() { + return ksqlEngine; + } + + private KsqlEntity executeStatement( + String statementText, + Statement statement, + Map streamsProperties + ) throws Exception { + if (statement instanceof ListTopics) { + return listTopics(statementText); + } else if (statement instanceof ListRegisteredTopics) { + return listRegisteredTopics(statementText); + } else if (statement instanceof ListStreams) { + return listStreams(statementText); + } else if (statement instanceof ListTables) { + return listTables(statementText); + } else if (statement instanceof ListQueries) { + return showQueries(statementText); + } else if (statement instanceof ShowColumns) { + ShowColumns showColumns = (ShowColumns) statement; + if (showColumns.isTopic()) { + return describeTopic(statementText, showColumns.getTable().getSuffix()); + } + return describe(statementText, showColumns.getTable().getSuffix()); + } else if (statement instanceof ListProperties) { + return listProperties(statementText); + } else if (statement instanceof Explain) { + Explain explain = (Explain) statement; + return getStatementExecutionPlan(explain, statementText); + } else if (statement instanceof RunScript) { + return distributeStatement(statementText, statement, streamsProperties); + }else if (statement instanceof RegisterTopic + || statement instanceof CreateStream + || statement instanceof CreateTable + || statement instanceof CreateStreamAsSelect + || statement instanceof CreateTableAsSelect + || statement instanceof TerminateQuery + || statement instanceof DropTopic + || statement instanceof DropStream + || statement instanceof DropTable + ) { + ExecutionPlan executionPlan = getStatementExecutionPlan(statement, statementText, + streamsProperties); + return distributeStatement(statementText, statement, streamsProperties); + } else { + if (statement != null) { + throw new Exception(String.format( + "Cannot handle statement of type '%s'", + statement.getClass().getSimpleName() + )); + } else if (statementText != null) { + throw new Exception(String.format( + "Unable to execute statement '%s'", + statementText + )); + } else { + throw new Exception("Unable to execute statement"); + } + } + } + + private CommandStatusEntity distributeStatement( + String statementText, + Statement statement, + Map streamsProperties + ) throws Exception { + CommandId commandId = + commandStore.distributeStatement(statementText, statement, streamsProperties); + CommandStatus commandStatus; + try { + commandStatus = statementExecutor.registerQueuedStatement(commandId) + .get(distributedCommandResponseTimeout, TimeUnit.MILLISECONDS); + } catch (TimeoutException exception) { + LOGGER.warn("Timeout to get commandStatus, waited {} milliseconds.", + distributedCommandResponseTimeout); + commandStatus = statementExecutor.getStatus(commandId).get(); + } + return new CommandStatusEntity(statementText, commandId, commandStatus); + } + + private KafkaTopicsList listTopics(String statementText) { + KafkaTopicClient client = ksqlEngine.getKafkaTopicClient(); + return KafkaTopicsList.build(statementText, getKsqlTopics(), + client.describeTopics(client.listTopicNames()), + ksqlEngine.getKsqlConfig()); + } + + private Collection getKsqlTopics() { + return ksqlEngine.getMetaStore().getAllKsqlTopics().values(); + } + + private KsqlTopicsList listRegisteredTopics(String statementText) { + return KsqlTopicsList.build(statementText, getKsqlTopics()); + } + + // Only shows queries running on the current machine, not across the entire cluster + private Queries showQueries(String statementText) { + List runningQueries = new ArrayList<>(); + for (PersistentQueryMetadata persistentQueryMetadata : + ksqlEngine.getPersistentQueries().values() + ) { + KsqlStructuredDataOutputNode ksqlStructuredDataOutputNode = + (KsqlStructuredDataOutputNode) persistentQueryMetadata.getOutputNode(); + + runningQueries.add(new Queries.RunningQuery( + persistentQueryMetadata.getStatementString(), + ksqlStructuredDataOutputNode.getKafkaTopicName(), + persistentQueryMetadata.getId() + )); + } + return new Queries(statementText, runningQueries); + } + + private TopicDescription describeTopic(String statementText, String name) throws + Exception { + KsqlTopic ksqlTopic = ksqlEngine.getMetaStore().getTopic(name); + if (ksqlTopic == null) { + throw new Exception(String.format("Could not find topic '%s' in the metastore", + name)); + } + String schemaString = null; + if (ksqlTopic.getKsqlTopicSerDe() instanceof KsqlAvroTopicSerDe) { + KsqlAvroTopicSerDe ksqlAvroTopicSerDe = (KsqlAvroTopicSerDe) ksqlTopic.getKsqlTopicSerDe(); + schemaString = ksqlAvroTopicSerDe.getSchemaString(); + } + TopicDescription topicDescription = new TopicDescription(statementText, name, ksqlTopic + .getKafkaTopicName(), + ksqlTopic.getKsqlTopicSerDe().getSerDe().toString(), + schemaString + ); + return topicDescription; + } + + private SourceDescription describe(String statementText, String name) throws Exception { + + StructuredDataSource dataSource = ksqlEngine.getMetaStore().getSource(name); + if (dataSource == null) { + throw new Exception(String.format("Could not find data stream/table '%s' in the metastore", + name)); + } + return new SourceDescription(statementText, dataSource); + } + + private PropertiesList listProperties(String statementText) { + return new PropertiesList(statementText, ksqlEngine.getKsqlConfigProperties()); + } + + private StreamsList listStreams(String statementText) { + return StreamsList.fromKsqlStreams(statementText, getSpecificSources(KsqlStream.class)); + } + + private TablesList listTables(String statementText) { + return TablesList.fromKsqlTables(statementText, getSpecificSources(KsqlTable.class)); + } + + private ExecutionPlan getStatementExecutionPlan(Explain explain, String statementText) + throws Exception { + return getStatementExecutionPlan(explain.getStatement(), statementText, Collections.emptyMap()); + } + + private ExecutionPlan getStatementExecutionPlan(Statement statement, String statementText, + Map properties) + throws Exception { + String executionPlan; + if (statement instanceof Query) { + executionPlan = ksqlEngine.getQueryExecutionPlan((Query) statement).getExecutionPlan(); + } else if (statement instanceof CreateStreamAsSelect) { + CreateStreamAsSelect createStreamAsSelect = (CreateStreamAsSelect) statement; + QueryMetadata queryMetadata = ksqlEngine.getQueryExecutionPlan(createStreamAsSelect + .getQuery()); + if (queryMetadata.getDataSourceType() == DataSource.DataSourceType.KTABLE) { + throw new KsqlException("Invalid result type. Your select query produces a TABLE. Please " + + "use CREATE TABLE AS SELECT statement instead."); + } + executionPlan = queryMetadata.getExecutionPlan(); + } else if (statement instanceof CreateTableAsSelect) { + CreateTableAsSelect createTableAsSelect = (CreateTableAsSelect) statement; + QueryMetadata queryMetadata = ksqlEngine.getQueryExecutionPlan(createTableAsSelect + .getQuery()); + if (queryMetadata.getDataSourceType() != DataSource.DataSourceType.KTABLE) { + throw new KsqlException("Invalid result type. Your select query produces a STREAM. Please " + + "use CREATE STREAM AS SELECT statement instead."); + } + executionPlan = queryMetadata.getExecutionPlan(); + } else if (statement instanceof RegisterTopic) { + RegisterTopic registerTopic = (RegisterTopic) statement; + RegisterTopicCommand registerTopicCommand = new RegisterTopicCommand(registerTopic, + properties); + new DDLCommandExec(ksqlEngine.getMetaStore().clone()).execute(registerTopicCommand); + executionPlan = registerTopic.toString(); + } else if (statement instanceof CreateStream) { + CreateStream createStream = (CreateStream) statement; + CreateStreamCommand createStreamCommand = + new CreateStreamCommand(createStream, properties, + ksqlEngine.getKafkaTopicClient()); + new DDLCommandExec(ksqlEngine.getMetaStore().clone()).execute(createStreamCommand); + executionPlan = createStream.toString(); + } else if (statement instanceof CreateTable) { + CreateTable createTable = (CreateTable) statement; + CreateTableCommand createTableCommand = + new CreateTableCommand(createTable, properties, ksqlEngine.getKafkaTopicClient()); + new DDLCommandExec(ksqlEngine.getMetaStore().clone()).execute(createTableCommand); + executionPlan = createTable.toString(); + } else if (statement instanceof DropTopic) { + DropTopic dropTopic = (DropTopic) statement; + DropTopicCommand dropTopicCommand = new DropTopicCommand(dropTopic); + new DDLCommandExec(ksqlEngine.getMetaStore().clone()).execute(dropTopicCommand); + executionPlan = dropTopic.toString(); + } else if (statement instanceof DropStream) { + DropStream dropStream = (DropStream) statement; + DropSourceCommand dropSourceCommand = new DropSourceCommand(dropStream); + new DDLCommandExec(ksqlEngine.getMetaStore().clone()).execute(dropSourceCommand); + executionPlan = dropStream.toString(); + } else if (statement instanceof DropTable) { + DropTable dropTable = (DropTable) statement; + DropSourceCommand dropSourceCommand = new DropSourceCommand(dropTable); + new DDLCommandExec(ksqlEngine.getMetaStore().clone()).execute(dropSourceCommand); + executionPlan = dropTable.toString(); + } else if (statement instanceof TerminateQuery) { + executionPlan = statement.toString(); + } else { + throw new KsqlException("Cannot build execution plan for this statement."); + } + return new ExecutionPlan(executionPlan); + } + + private List getSpecificSources(Class dataSourceClass) { + return ksqlEngine.getMetaStore().getAllStructuredDataSources().values().stream() + .filter(dataSourceClass::isInstance) + .filter(structuredDataSource -> !structuredDataSource.getName().equalsIgnoreCase( + KsqlRestApplication.getCommandsStreamName())) + .map(dataSourceClass::cast) + .collect(Collectors.toList()); + } +} diff --git a/ksql-rest-app/src/main/java/io/confluent/ksql/rest/server/resources/ServerInfoResource.java b/ksql-rest-app/src/main/java/io/confluent/ksql/rest/server/resources/ServerInfoResource.java new file mode 100644 index 000000000000..52a125906c7c --- /dev/null +++ b/ksql-rest-app/src/main/java/io/confluent/ksql/rest/server/resources/ServerInfoResource.java @@ -0,0 +1,29 @@ +/** + * Copyright 2017 Confluent Inc. + **/ + +package io.confluent.ksql.rest.server.resources; + +import io.confluent.ksql.rest.entity.ServerInfo; + +import javax.ws.rs.GET; +import javax.ws.rs.Path; +import javax.ws.rs.Produces; +import javax.ws.rs.core.MediaType; +import javax.ws.rs.core.Response; + +@Path("/") +@Produces(MediaType.APPLICATION_JSON) +public class ServerInfoResource { + + private final ServerInfo serverInfo; + + public ServerInfoResource(ServerInfo serverInfo) { + this.serverInfo = serverInfo; + } + + @GET + public Response getServerInfo() { + return Response.ok(serverInfo).build(); + } +} diff --git a/ksql-rest-app/src/main/java/io/confluent/ksql/rest/server/resources/StatusResource.java b/ksql-rest-app/src/main/java/io/confluent/ksql/rest/server/resources/StatusResource.java new file mode 100644 index 000000000000..ac6878258c7f --- /dev/null +++ b/ksql-rest-app/src/main/java/io/confluent/ksql/rest/server/resources/StatusResource.java @@ -0,0 +1,49 @@ +/** + * Copyright 2017 Confluent Inc. + **/ + +package io.confluent.ksql.rest.server.resources; + +import io.confluent.ksql.rest.entity.CommandStatus; +import io.confluent.ksql.rest.entity.CommandStatuses; +import io.confluent.ksql.rest.server.computation.CommandId; +import io.confluent.ksql.rest.server.computation.StatementExecutor; + +import javax.ws.rs.GET; +import javax.ws.rs.Path; +import javax.ws.rs.PathParam; +import javax.ws.rs.Produces; +import javax.ws.rs.core.MediaType; +import javax.ws.rs.core.Response; +import java.util.Optional; + +@Path("/status") +@Produces(MediaType.APPLICATION_JSON) +public class StatusResource { + + private final StatementExecutor statementExecutor; + + public StatusResource(StatementExecutor statementExecutor) { + this.statementExecutor = statementExecutor; + } + + @GET + public Response getAllStatuses() { + return Response.ok(CommandStatuses.fromFullStatuses(statementExecutor.getStatuses())).build(); + } + + @GET + @Path("/{type}/{entity}") + public Response getStatus(@PathParam("type") String type, @PathParam("entity") String entity) + throws Exception { + CommandId commandId = new CommandId(type, entity); + + Optional commandStatus = statementExecutor.getStatus(commandId); + + if (!commandStatus.isPresent()) { + throw new Exception("Command not found"); + } + + return Response.ok(commandStatus.get()).build(); + } +} diff --git a/ksql-rest-app/src/main/java/io/confluent/ksql/rest/server/resources/streaming/QueryRowWriter.java b/ksql-rest-app/src/main/java/io/confluent/ksql/rest/server/resources/streaming/QueryRowWriter.java new file mode 100644 index 000000000000..4e075fcde635 --- /dev/null +++ b/ksql-rest-app/src/main/java/io/confluent/ksql/rest/server/resources/streaming/QueryRowWriter.java @@ -0,0 +1,64 @@ +/** + * Copyright 2017 Confluent Inc. + **/ + +package io.confluent.ksql.rest.server.resources.streaming; + +import com.fasterxml.jackson.core.JsonGenerator; +import com.fasterxml.jackson.databind.ObjectMapper; +import io.confluent.ksql.physical.GenericRow; +import io.confluent.ksql.rest.entity.StreamedRow; +import org.apache.kafka.streams.KeyValue; + +import java.io.IOException; +import java.io.OutputStream; +import java.util.concurrent.SynchronousQueue; +import java.util.concurrent.atomic.AtomicBoolean; +import java.util.concurrent.atomic.AtomicReference; + +class QueryRowWriter implements Runnable { + private final OutputStream output; + private final AtomicReference streamsException; + private final SynchronousQueue> rowQueue; + private final AtomicBoolean rowsWritten; + private final ObjectMapper objectMapper; + + QueryRowWriter( + OutputStream output, + AtomicReference streamsException, + SynchronousQueue> rowQueue, + AtomicBoolean rowsWritten + ) { + this.output = output; + this.streamsException = streamsException; + this.rowQueue = rowQueue; + this.rowsWritten = rowsWritten; + + this.objectMapper = new ObjectMapper().disable(JsonGenerator.Feature.AUTO_CLOSE_TARGET); + } + + @Override + public void run() { + try { + while (true) { + write(rowQueue.take().value); + } + } catch (InterruptedException exception) { + // Interrupt is used to end the thread + } catch (Exception exception) { + // Would just throw the exception, but 1) can't throw checked exceptions from Runnable.run(), + // and 2) seems easier than converting the exception into an unchecked exception and then + // throwing it to a custom Thread.UncaughtExceptionHandler + streamsException.compareAndSet(null, exception); + } + } + + private void write(GenericRow row) throws IOException { + synchronized (output) { + objectMapper.writeValue(output, new StreamedRow(row)); + output.write("\n".getBytes()); + output.flush(); + rowsWritten.set(true); + } + } +} diff --git a/ksql-rest-app/src/main/java/io/confluent/ksql/rest/server/resources/streaming/QueryStreamWriter.java b/ksql-rest-app/src/main/java/io/confluent/ksql/rest/server/resources/streaming/QueryStreamWriter.java new file mode 100644 index 000000000000..ab986a8c0780 --- /dev/null +++ b/ksql-rest-app/src/main/java/io/confluent/ksql/rest/server/resources/streaming/QueryStreamWriter.java @@ -0,0 +1,130 @@ +/** + * Copyright 2017 Confluent Inc. + **/ + +package io.confluent.ksql.rest.server.resources.streaming; + +import com.fasterxml.jackson.databind.ObjectMapper; +import io.confluent.ksql.KsqlEngine; +import io.confluent.ksql.rest.entity.StreamedRow; +import io.confluent.ksql.util.KsqlException; +import io.confluent.ksql.util.QueryMetadata; +import io.confluent.ksql.util.QueuedQueryMetadata; +import org.slf4j.Logger; +import org.slf4j.LoggerFactory; + +import javax.ws.rs.core.StreamingOutput; +import java.io.EOFException; +import java.io.IOException; +import java.io.OutputStream; +import java.util.Map; +import java.util.concurrent.TimeUnit; +import java.util.concurrent.atomic.AtomicBoolean; +import java.util.concurrent.atomic.AtomicReference; + +class QueryStreamWriter implements StreamingOutput { + + private static final Logger log = LoggerFactory.getLogger(QueryStreamWriter.class); + + private final QueuedQueryMetadata queryMetadata; + private final long disconnectCheckInterval; + private final AtomicReference streamsException; + + QueryStreamWriter( + KsqlEngine ksqlEngine, + long disconnectCheckInterval, + String queryString, + Map overriddenProperties + ) + throws Exception { + QueryMetadata queryMetadata = + ksqlEngine.buildMultipleQueries(true, queryString, overriddenProperties).get(0); + if (!(queryMetadata instanceof QueuedQueryMetadata)) { + throw new Exception(String.format( + "Unexpected metadata type: expected QueuedQueryMetadata, found %s instead", + queryMetadata.getClass() + )); + } + + this.disconnectCheckInterval = disconnectCheckInterval; + this.queryMetadata = ((QueuedQueryMetadata) queryMetadata); + + this.streamsException = new AtomicReference<>(null); + this.queryMetadata.getKafkaStreams().setUncaughtExceptionHandler(new StreamsExceptionHandler()); + + queryMetadata.getKafkaStreams().start(); + } + + @Override + public void write(OutputStream out) throws IOException { + try { + AtomicBoolean rowsWritten = new AtomicBoolean(false); + QueryRowWriter queryRowWriter = new QueryRowWriter( + out, + streamsException, + queryMetadata.getRowQueue(), + rowsWritten + ); + Thread rowWriterThread = new Thread(queryRowWriter); + rowWriterThread.start(); + try { + while (true) { + Thread.sleep(disconnectCheckInterval); + Throwable exception = streamsException.get(); + if (exception != null) { + throw exception; + } + // If no new rows have been written, the user may have terminated the connection without + // us knowing. Check by trying to write a single newline. + if (!rowsWritten.getAndSet(false)) { + synchronized (out) { + out.write("\n".getBytes()); + out.flush(); + } + } + } + } catch (EOFException exception) { + // The user has terminated the connection; we can stop writing + } catch (InterruptedException exception) { + // The most likely cause of this is the server shutting down. Should just try to close + // gracefully, without writing any more to the connection stream. + log.warn("Interrupted while writing to connection stream"); + } catch (Throwable exception) { + log.error("Exception occurred while writing to connection stream: ", exception); + synchronized (out) { + out.write("\n".getBytes()); + if (exception.getCause() instanceof KsqlException) { + new ObjectMapper().writeValue(out, new StreamedRow(exception.getCause())); + } else { + new ObjectMapper().writeValue(out, new StreamedRow(exception)); + } + out.write("\n".getBytes()); + out.flush(); + } + } + + if (rowWriterThread.isAlive()) { + try { + rowWriterThread.interrupt(); + rowWriterThread.join(); + } catch (InterruptedException exception) { + log.warn( + "Failed to join row writer thread; setting to daemon to avoid hanging on shutdown" + ); + rowWriterThread.setDaemon(true); + } + } + + } finally { + queryMetadata.getKafkaStreams().close(100L, TimeUnit.MILLISECONDS); + queryMetadata.getKafkaStreams().cleanUp(); + } + } + + private class StreamsExceptionHandler implements Thread.UncaughtExceptionHandler { + @Override + public void uncaughtException(Thread thread, Throwable exception) { + streamsException.compareAndSet(null, exception); + } + } +} diff --git a/ksql-rest-app/src/main/java/io/confluent/ksql/rest/server/resources/streaming/StreamedQueryResource.java b/ksql-rest-app/src/main/java/io/confluent/ksql/rest/server/resources/streaming/StreamedQueryResource.java new file mode 100644 index 000000000000..ae32437f3ad7 --- /dev/null +++ b/ksql-rest-app/src/main/java/io/confluent/ksql/rest/server/resources/streaming/StreamedQueryResource.java @@ -0,0 +1,88 @@ +/** + * Copyright 2017 Confluent Inc. + **/ + +package io.confluent.ksql.rest.server.resources.streaming; + +import io.confluent.ksql.KsqlEngine; +import io.confluent.ksql.metastore.KsqlTopic; +import io.confluent.ksql.parser.tree.LongLiteral; +import io.confluent.ksql.parser.tree.PrintTopic; +import io.confluent.ksql.parser.tree.Query; +import io.confluent.ksql.parser.tree.Statement; +import io.confluent.ksql.rest.entity.KsqlRequest; +import io.confluent.ksql.rest.server.StatementParser; +import org.slf4j.Logger; +import org.slf4j.LoggerFactory; + +import javax.ws.rs.Consumes; +import javax.ws.rs.POST; +import javax.ws.rs.Path; +import javax.ws.rs.Produces; +import javax.ws.rs.core.MediaType; +import javax.ws.rs.core.Response; +import java.util.Collections; +import java.util.Map; +import java.util.Objects; +import java.util.Optional; + +@Path("/query") +@Produces(MediaType.APPLICATION_JSON) +public class StreamedQueryResource { + private static final Logger log = LoggerFactory.getLogger(StreamedQueryResource.class); + + private final KsqlEngine ksqlEngine; + private final StatementParser statementParser; + private final long disconnectCheckInterval; + + public StreamedQueryResource( + KsqlEngine ksqlEngine, + StatementParser statementParser, + long disconnectCheckInterval + ) { + this.ksqlEngine = ksqlEngine; + this.statementParser = statementParser; + this.disconnectCheckInterval = disconnectCheckInterval; + } + + @POST + @Consumes(MediaType.APPLICATION_JSON) + public Response streamQuery(KsqlRequest request) throws Exception { + String ksql = Objects.requireNonNull(request.getKsql(), "\"ksql\" field must be given"); + Map clientLocalProperties = + Optional.ofNullable(request.getStreamsProperties()).orElse(Collections.emptyMap()); + Statement statement = statementParser.parseSingleStatement(ksql); + if (statement instanceof Query) { + QueryStreamWriter queryStreamWriter = + new QueryStreamWriter(ksqlEngine, disconnectCheckInterval, ksql, clientLocalProperties); + log.info("Streaming query '{}'", ksql); + return Response.ok().entity(queryStreamWriter).build(); + } else if (statement instanceof PrintTopic) { + PrintTopic printTopic = (PrintTopic) statement; + String topicName = printTopic.getTopic().toString(); + Long interval = + Optional.ofNullable(printTopic.getIntervalValue()).map(LongLiteral::getValue).orElse(1L); + KsqlTopic ksqlTopic = ksqlEngine.getMetaStore().getTopic(printTopic.getTopic().toString()); + Objects.requireNonNull( + ksqlTopic, + String.format("Could not find topic '%s' in the metastore", topicName) + ); + Map properties = ksqlEngine.getKsqlConfigProperties(); + properties.putAll(clientLocalProperties); + TopicStreamWriter topicStreamWriter = new TopicStreamWriter( + properties, + ksqlTopic, + interval, + disconnectCheckInterval, + printTopic.getFromBeginning() + ); + log.info("Printing topic '{}'", topicName); + return Response.ok().entity(topicStreamWriter).build(); + } else { + throw new Exception(String.format( + "Statement type `%s' not supported for this resource", + statement.getClass().getName() + )); + } + } +} diff --git a/ksql-rest-app/src/main/java/io/confluent/ksql/rest/server/resources/streaming/TopicStreamWriter.java b/ksql-rest-app/src/main/java/io/confluent/ksql/rest/server/resources/streaming/TopicStreamWriter.java new file mode 100644 index 000000000000..3f45e245658d --- /dev/null +++ b/ksql-rest-app/src/main/java/io/confluent/ksql/rest/server/resources/streaming/TopicStreamWriter.java @@ -0,0 +1,162 @@ +/** + * Copyright 2017 Confluent Inc. + **/ + +package io.confluent.ksql.rest.server.resources.streaming; + +import com.fasterxml.jackson.databind.JsonNode; +import com.fasterxml.jackson.databind.ObjectMapper; +import com.fasterxml.jackson.databind.node.ObjectNode; + +import io.confluent.ksql.metastore.DataSource; +import io.confluent.ksql.metastore.KsqlTopic; +import io.confluent.ksql.serde.avro.KsqlAvroTopicSerDe; +import io.confluent.ksql.serde.avro.KsqlGenericRowAvroDeserializer; +import io.confluent.ksql.serde.avro.KsqlGenericRowAvroSerializer; +import io.confluent.ksql.util.SchemaUtil; + +import org.apache.kafka.clients.consumer.ConsumerRecord; +import org.apache.kafka.clients.consumer.ConsumerRecords; +import org.apache.kafka.clients.consumer.KafkaConsumer; +import org.apache.kafka.common.TopicPartition; +import org.apache.kafka.common.serialization.Deserializer; +import org.apache.kafka.common.serialization.StringDeserializer; +import org.slf4j.Logger; +import org.slf4j.LoggerFactory; + +import javax.ws.rs.WebApplicationException; +import javax.ws.rs.core.StreamingOutput; +import java.io.EOFException; +import java.io.IOException; +import java.io.OutputStream; +import java.util.HashMap; +import java.util.List; +import java.util.Map; +import java.util.stream.Collectors; + + +public class TopicStreamWriter implements StreamingOutput { + + private static final Logger log = LoggerFactory.getLogger(TopicStreamWriter.class); + private final Long interval; + private final long disconnectCheckInterval; + private final KafkaConsumer topicConsumer; + private final String kafkaTopic; + KsqlTopic ksqlTopic; + private final ObjectMapper objectMapper; + + private long messagesWritten; + + public TopicStreamWriter( + Map consumerProperties, + KsqlTopic ksqlTopic, + long interval, + long disconnectCheckInterval, + boolean fromBeginning + ) { + this.ksqlTopic = ksqlTopic; + this.kafkaTopic = ksqlTopic.getKafkaTopicName(); + this.messagesWritten = 0; + this.objectMapper = new ObjectMapper(); + + Deserializer valueDeserializer; + switch (ksqlTopic.getKsqlTopicSerDe().getSerDe()) { + case JSON: + case DELIMITED: + valueDeserializer = new StringDeserializer(); + break; + case AVRO: + KsqlAvroTopicSerDe avroTopicSerDe = (KsqlAvroTopicSerDe) ksqlTopic.getKsqlTopicSerDe(); + Map avroSerdeProps = new HashMap<>(); + avroSerdeProps.put( + KsqlGenericRowAvroSerializer.AVRO_SERDE_SCHEMA_CONFIG, + avroTopicSerDe.getSchemaString() + ); + valueDeserializer = new KsqlGenericRowAvroDeserializer(null); + valueDeserializer.configure(avroSerdeProps, false); + break; + default: + throw new RuntimeException(String.format( + "Unexpected SerDe type: %s", + ksqlTopic.getDataSourceType().name() + )); + } + + this.disconnectCheckInterval = disconnectCheckInterval; + + this.topicConsumer = + new KafkaConsumer<>(consumerProperties, new StringDeserializer(), valueDeserializer); + List topicPartitions = topicConsumer.partitionsFor(kafkaTopic) + .stream() + .map(partitionInfo -> new TopicPartition(partitionInfo.topic(), partitionInfo.partition())) + .collect(Collectors.toList()); + topicConsumer.assign(topicPartitions); + + if (fromBeginning) { + topicConsumer.seekToBeginning(topicPartitions); + } + + this.interval = interval; + } + + @Override + public void write(OutputStream out) throws IOException, WebApplicationException { + try { + while (true) { + ConsumerRecords records = topicConsumer.poll(disconnectCheckInterval); + if (records.isEmpty()) { + synchronized (out) { + out.write("\n".getBytes()); + out.flush(); + } + } else { + synchronized (out) { + for (ConsumerRecord record : records.records(kafkaTopic)) { + if (record.value() != null) { + if (messagesWritten++ % interval == 0) { + if (ksqlTopic.getKsqlTopicSerDe().getSerDe() == DataSource.DataSourceSerDe.JSON) { + printJsonValue(out, record); + } else { + printAvroOrDelimitedValue(out, record); + } + } + } + } + } + } + } + } catch (EOFException exception) { + // Connection terminated, we can stop writing + } catch (Exception exception) { + log.error("Exception encountered while writing to output stream", exception); + synchronized (out) { + out.write(exception.getMessage().getBytes()); + out.write("\n".getBytes()); + out.flush(); + } + } finally { + topicConsumer.close(); + } + } + + private void printJsonValue(OutputStream out, ConsumerRecord record) throws IOException { + JsonNode jsonNode = objectMapper.readTree(record.value().toString()); + ObjectNode objectNode = objectMapper.createObjectNode(); + objectNode.put(SchemaUtil.ROWTIME_NAME, record.timestamp()); + objectNode.put(SchemaUtil.ROWKEY_NAME, (record.key() != null)? record.key() + .toString(): "null"); + objectNode.setAll((ObjectNode) jsonNode); + objectMapper.writeValue(out, objectNode); + out.write("\n".getBytes()); + out.flush(); + } + + private void printAvroOrDelimitedValue(OutputStream out, ConsumerRecord record) throws + IOException { + out.write((record.timestamp() + " , " +record.key().toString() + " , " + record.value() + .toString()).getBytes()); + out.write("\n".getBytes()); + out.flush(); + } + +} diff --git a/ksql-rest-app/src/main/resources/io/confluent/ksql/rest/quickstart.html b/ksql-rest-app/src/main/resources/io/confluent/ksql/rest/quickstart.html new file mode 100644 index 000000000000..f791f3197599 --- /dev/null +++ b/ksql-rest-app/src/main/resources/io/confluent/ksql/rest/quickstart.html @@ -0,0 +1,438 @@ + + + + + KSQL Quickstart + + + + +
+ KSQL: +
+ +
+
+ Streams Properties: +
+
+
+
+
+ +   (examples: "LIST STREAMS;", "SHOW QUERIES;", "CREATE STREAM foo AS SELECT baz FROM bar;") +
+
+ +   (example: "SELECT * FROM foo;", "PRINT topic_foo;") +
+
+ + +
+
+
+ Server response: +
+
+ Output format (not supported with topic printing): + + + +
+
+

+    
+ + diff --git a/ksql-rest-app/src/main/resources/log4j.properties b/ksql-rest-app/src/main/resources/log4j.properties new file mode 100644 index 000000000000..3c7fa1fdaa4d --- /dev/null +++ b/ksql-rest-app/src/main/resources/log4j.properties @@ -0,0 +1,5 @@ +log4j.rootLogger=INFO,stdout + +log4j.appender.stdout=org.apache.log4j.ConsoleAppender +log4j.appender.stdout.layout=org.apache.log4j.PatternLayout +log4j.appender.stdout.layout.ConversionPattern=[%d] %p %m (%c:%L)%n diff --git a/ksql-rest-app/src/test/java/io/confluent/ksql/rest/client/KsqlRestClientTest.java b/ksql-rest-app/src/test/java/io/confluent/ksql/rest/client/KsqlRestClientTest.java new file mode 100644 index 000000000000..a2a1fba2abd4 --- /dev/null +++ b/ksql-rest-app/src/test/java/io/confluent/ksql/rest/client/KsqlRestClientTest.java @@ -0,0 +1,80 @@ +package io.confluent.ksql.rest.client; + +import org.apache.kafka.streams.StreamsConfig; +import org.junit.After; +import org.junit.Assert; +import org.junit.Before; +import org.junit.Test; + +import java.util.HashMap; +import java.util.Map; + +import io.confluent.ksql.rest.entity.CommandStatus; +import io.confluent.ksql.rest.entity.CommandStatuses; +import io.confluent.ksql.rest.entity.ExecutionPlan; +import io.confluent.ksql.rest.entity.KsqlEntityList; +import io.confluent.ksql.rest.server.KsqlRestConfig; +import io.confluent.ksql.rest.server.computation.CommandId; +import io.confluent.ksql.rest.server.mock.MockApplication; + +public class KsqlRestClientTest { + + MockApplication mockApplication; + int portNumber = 59098; + KsqlRestConfig ksqlRestConfig; + KsqlRestClient ksqlRestClient; + + @Before + public void init() throws Exception { + Map props = new HashMap<>(); + props.put(KsqlRestConfig.LISTENERS_CONFIG, "http://localhost:59098"); +// props.put(KsqlRestConfig.PORT_CONFIG, String.valueOf(portNumber)); + props.put(StreamsConfig.BOOTSTRAP_SERVERS_CONFIG, "localhost:9092"); + props.put(StreamsConfig.APPLICATION_ID_CONFIG, "ksql_config_test"); + props.put(KsqlRestConfig.COMMAND_TOPIC_SUFFIX_CONFIG, "commands"); + ksqlRestConfig = new KsqlRestConfig(props); + mockApplication = new MockApplication(ksqlRestConfig); + mockApplication.start(); + + ksqlRestClient = new KsqlRestClient("http://localhost:" + portNumber); + } + + @After + public void cleanUp() throws Exception { + mockApplication.stop(); + } + + @Test + public void testKsqlResource() { + + RestResponse results = ksqlRestClient.makeKsqlRequest("Test request"); + Assert.assertNotNull(results); + Assert.assertTrue(results.isSuccessful()); + KsqlEntityList ksqlEntityList = results.getResponse(); + Assert.assertTrue(ksqlEntityList.size() == 1); + Assert.assertTrue(ksqlEntityList.get(0) instanceof ExecutionPlan); + } + + + @Test + public void testStreamQuery() { + RestResponse queryResponse = ksqlRestClient.makeQueryRequest + ("Select *"); + Assert.assertNotNull(queryResponse); + Assert.assertTrue(queryResponse.isSuccessful()); + } + + @Test + public void testStatus() { + RestResponse commandStatusesRestResponse = ksqlRestClient.makeStatusRequest(); + Assert.assertNotNull(commandStatusesRestResponse); + Assert.assertTrue(commandStatusesRestResponse.isSuccessful()); + CommandStatuses commandStatuses = commandStatusesRestResponse.getResponse(); + Assert.assertTrue(commandStatuses.size() == 2); + Assert.assertTrue(commandStatuses.get(new CommandId(CommandId.Type.TOPIC, "c1")) == CommandStatus.Status.SUCCESS); + Assert.assertTrue(commandStatuses.get(new CommandId(CommandId.Type.TOPIC, "c2")) == + CommandStatus.Status.ERROR); + + } + +} diff --git a/ksql-rest-app/src/test/java/io/confluent/ksql/rest/server/KsqlRestConfigTest.java b/ksql-rest-app/src/test/java/io/confluent/ksql/rest/server/KsqlRestConfigTest.java new file mode 100644 index 000000000000..c594b1b645da --- /dev/null +++ b/ksql-rest-app/src/test/java/io/confluent/ksql/rest/server/KsqlRestConfigTest.java @@ -0,0 +1,80 @@ +package io.confluent.ksql.rest.server; + +import org.apache.kafka.streams.StreamsConfig; +import org.junit.Test; + +import java.util.HashMap; +import java.util.Map; + +import static org.junit.Assert.assertEquals; +import static org.junit.Assert.assertNotEquals; +import static org.junit.Assert.assertNotSame; + +public class KsqlRestConfigTest { + + private Map getBaseProperties() { + Map result = new HashMap<>(); + result.put(StreamsConfig.BOOTSTRAP_SERVERS_CONFIG, "localhost:9092"); + result.put(StreamsConfig.APPLICATION_ID_CONFIG, "ksql_config_test"); + result.put(KsqlRestConfig.COMMAND_TOPIC_SUFFIX_CONFIG, "commands"); + return result; + } + + private void assertKeyEquals(String key, Map expected, Map test) { + assertEquals(expected.get(key), test.get(key)); + } + + @Test + public void testGetKsqlStreamsProperties() { + final long BASE_COMMIT_INTERVAL_MS = 1000; + final long OVERRIDE_COMMIT_INTERVAL_MS = 100; + + final String OVERRIDE_BOOTSTRAP_SERVERS = "ksql.io.confluent:9098"; + + assertNotEquals(BASE_COMMIT_INTERVAL_MS, OVERRIDE_COMMIT_INTERVAL_MS); + + Map inputProperties = getBaseProperties(); + inputProperties.put( + StreamsConfig.COMMIT_INTERVAL_MS_CONFIG, + BASE_COMMIT_INTERVAL_MS + ); + inputProperties.put( + KsqlRestConfig.KSQL_STREAMS_PREFIX + StreamsConfig.COMMIT_INTERVAL_MS_CONFIG, + OVERRIDE_COMMIT_INTERVAL_MS + ); + inputProperties.put( + KsqlRestConfig.KSQL_STREAMS_PREFIX + StreamsConfig.BOOTSTRAP_SERVERS_CONFIG, + OVERRIDE_BOOTSTRAP_SERVERS + ); + + Map testProperties = new KsqlRestConfig(inputProperties).getKsqlStreamsProperties(); + + assertEquals( + OVERRIDE_COMMIT_INTERVAL_MS, + testProperties.get(StreamsConfig.COMMIT_INTERVAL_MS_CONFIG) + ); + assertEquals( + OVERRIDE_BOOTSTRAP_SERVERS, + testProperties.get(StreamsConfig.BOOTSTRAP_SERVERS_CONFIG) + ); + } + + // Just a sanity check to make sure that, although they contain identical mappings, successive maps returned by calls + // to KsqlRestConfig.getOriginals() do not actually return the same object (mutability would then be an issue) + @Test + public void testOriginalsReplicability() { + final String COMMIT_INTERVAL_MS = "10"; + + Map inputProperties = getBaseProperties(); + inputProperties.put(StreamsConfig.COMMIT_INTERVAL_MS_CONFIG, COMMIT_INTERVAL_MS); + KsqlRestConfig config = new KsqlRestConfig(inputProperties); + + final Map originals1 = config.getOriginals(); + final Map originals2 = config.getOriginals(); + + assertEquals(originals1, originals2); + assertNotSame(originals1, originals2); + assertEquals(COMMIT_INTERVAL_MS, originals1.get(StreamsConfig.COMMIT_INTERVAL_MS_CONFIG)); + assertEquals(COMMIT_INTERVAL_MS, originals2.get(StreamsConfig.COMMIT_INTERVAL_MS_CONFIG)); + } +} diff --git a/ksql-rest-app/src/test/java/io/confluent/ksql/rest/server/computation/CommandRunnerTest.java b/ksql-rest-app/src/test/java/io/confluent/ksql/rest/server/computation/CommandRunnerTest.java new file mode 100644 index 000000000000..2b405631698a --- /dev/null +++ b/ksql-rest-app/src/test/java/io/confluent/ksql/rest/server/computation/CommandRunnerTest.java @@ -0,0 +1,93 @@ +package io.confluent.ksql.rest.server.computation; + + +import org.apache.kafka.clients.consumer.KafkaConsumer; +import org.apache.kafka.clients.producer.KafkaProducer; +import org.apache.kafka.common.serialization.Deserializer; +import org.apache.kafka.common.serialization.Serializer; +import org.junit.Assert; +import org.junit.Test; + +import java.util.HashMap; +import java.util.Map; + +import io.confluent.kafka.serializers.KafkaJsonDeserializer; +import io.confluent.kafka.serializers.KafkaJsonSerializer; +import io.confluent.ksql.metastore.MetaStoreImpl; +import io.confluent.ksql.rest.entity.CommandStatus; +import io.confluent.ksql.rest.server.StatementParser; +import io.confluent.ksql.rest.server.mock.MockCommandStore; +import io.confluent.ksql.rest.server.mock.MockKafkaTopicClient; +import io.confluent.ksql.rest.server.mock.MockKsqkEngine; +import io.confluent.ksql.rest.server.utils.TestUtils; + +public class CommandRunnerTest { + + private MockKsqkEngine mockKsqkEngine = new MockKsqkEngine( + TestUtils.getMockKsqlConfig(), new MockKafkaTopicClient()); + private StatementParser statementParser = new StatementParser(mockKsqkEngine); + StatementExecutor statementExecutor = new StatementExecutor(mockKsqkEngine, statementParser); + CommandRunner commandRunner = null; + + private CommandRunner getCommanRunner() { + if (commandRunner != null) { + return commandRunner; + } + Map commandConsumerProperties = new HashMap<>(); + commandConsumerProperties.put("bootstrap.servers", "localhost:9092"); + Serializer commandSerializer = new KafkaJsonSerializer<>(); + Deserializer commandDeserializer = new KafkaJsonDeserializer<>(); + Serializer commandIdSerializer = new KafkaJsonSerializer<>(); + Deserializer commandIdDeserializer = new KafkaJsonDeserializer<>(); + + KafkaConsumer commandConsumer = new KafkaConsumer<>( + commandConsumerProperties, + commandIdDeserializer, + commandDeserializer + ); + + CommandRunner commandRunner = new CommandRunner(statementExecutor, new MockCommandStore + ("CT", commandConsumer, null, + new CommandIdAssigner(new MetaStoreImpl()))); + return commandRunner; + } + + + @Test + public void testThread() throws InterruptedException { + CommandRunner commandRunner = getCommanRunner(); + new Thread(commandRunner).start(); + Thread.sleep(5000); + commandRunner.close(); + CommandId topicCommandId = new CommandId(CommandId.Type.TOPIC, "_CSASTopicGen"); + CommandId csCommandId = new CommandId(CommandId.Type.STREAM, "_CSASStreamGen"); + CommandId csasCommandId = new CommandId(CommandId.Type.STREAM, "_CSASGen"); + CommandId ctasCommandId = new CommandId(CommandId.Type.TABLE, "_CTASGen"); + + Map statusStore = statementExecutor.getStatuses(); + Assert.assertNotNull(statusStore); + Assert.assertEquals(statusStore.size(), 4); + Assert.assertEquals(statusStore.get(topicCommandId).getStatus(), CommandStatus.Status.SUCCESS); + Assert.assertEquals(statusStore.get(csCommandId).getStatus(), CommandStatus.Status.SUCCESS); + Assert.assertEquals(statusStore.get(csasCommandId).getStatus(), CommandStatus.Status.ERROR); + Assert.assertEquals(statusStore.get(ctasCommandId).getStatus(), CommandStatus.Status.ERROR); + } + + @Test + public void testPriorCommandsRun() throws Exception { + CommandRunner commandRunner = getCommanRunner(); + commandRunner.processPriorCommands(); + CommandId topicCommandId = new CommandId(CommandId.Type.TOPIC, "_CSASTopicGen"); + CommandId csCommandId = new CommandId(CommandId.Type.STREAM, "_CSASStreamGen"); + CommandId csasCommandId = new CommandId(CommandId.Type.STREAM, "_CSASGen"); + CommandId ctasCommandId = new CommandId(CommandId.Type.TABLE, "_CTASGen"); + Map statusStore = statementExecutor.getStatuses(); + Assert.assertNotNull(statusStore); + Assert.assertEquals(statusStore.size(), 4); + Assert.assertEquals(statusStore.get(topicCommandId).getStatus(), CommandStatus.Status.SUCCESS); + Assert.assertEquals(statusStore.get(csCommandId).getStatus(), CommandStatus.Status.SUCCESS); + Assert.assertEquals(statusStore.get(csasCommandId).getStatus(), CommandStatus.Status.ERROR); + Assert.assertEquals(statusStore.get(ctasCommandId).getStatus(), CommandStatus.Status.ERROR); + } + +} diff --git a/ksql-rest-app/src/test/java/io/confluent/ksql/rest/server/computation/StatementExecutorTest.java b/ksql-rest-app/src/test/java/io/confluent/ksql/rest/server/computation/StatementExecutorTest.java new file mode 100644 index 000000000000..7ac798bfa43e --- /dev/null +++ b/ksql-rest-app/src/test/java/io/confluent/ksql/rest/server/computation/StatementExecutorTest.java @@ -0,0 +1,126 @@ +package io.confluent.ksql.rest.server.computation; + + +import org.easymock.EasyMockSupport; +import org.junit.Assert; +import org.junit.Test; + +import java.util.HashMap; +import java.util.LinkedHashMap; +import java.util.List; +import java.util.Map; + +import io.confluent.ksql.rest.entity.CommandStatus; +import io.confluent.ksql.rest.server.StatementParser; +import io.confluent.ksql.rest.server.mock.MockKafkaTopicClient; +import io.confluent.ksql.rest.server.mock.MockKsqkEngine; +import io.confluent.ksql.rest.server.utils.TestUtils; +import io.confluent.ksql.util.Pair; + +public class StatementExecutorTest extends EasyMockSupport { + + private MockKsqkEngine mockKsqkEngine = new MockKsqkEngine( + TestUtils.getMockKsqlConfig(), new MockKafkaTopicClient()); + + private StatementParser statementParser = new StatementParser(mockKsqkEngine); + + StatementExecutor statementExecutor = new StatementExecutor(mockKsqkEngine, statementParser); + + + + @Test + public void handleCorrectDDLStatement() throws Exception { + Command command = new Command("REGISTER TOPIC users_topic WITH (value_format = 'json', " + + "kafka_topic='user_topic_json');", new HashMap<>()); + CommandId commandId = new CommandId(CommandId.Type.TOPIC, "_CorrectTopicGen"); + statementExecutor.handleStatement(command, commandId); + Map statusStore = statementExecutor.getStatuses(); + Assert.assertNotNull(statusStore); + Assert.assertEquals(statusStore.size(), 1); + Assert.assertEquals(statusStore.get(commandId).getStatus(), CommandStatus.Status.SUCCESS); + + } + + @Test + public void handleIncorrectDDLStatement() throws Exception { + Command command = new Command("REGIST ER TOPIC users_topic WITH (value_format = 'json', " + + "kafka_topic='user_topic_json');", new HashMap<>()); + CommandId commandId = new CommandId(CommandId.Type.TOPIC, "_IncorrectTopicGen"); + statementExecutor.handleStatement(command, commandId); + Map statusStore = statementExecutor.getStatuses(); + Assert.assertNotNull(statusStore); + Assert.assertEquals(statusStore.size(), 1); + Assert.assertEquals(statusStore.get(commandId).getStatus(), CommandStatus.Status.ERROR); + + } + + @Test + public void handleCSAS_CTASStatement() throws Exception { + + Command topicCommand = new Command("REGISTER TOPIC pageview_topic WITH " + + "(value_format = 'json', " + + "kafka_topic='pageview_topic_json');", new HashMap<>()); + CommandId topicCommandId = new CommandId(CommandId.Type.TOPIC, "_CSASTopicGen"); + statementExecutor.handleStatement(topicCommand, topicCommandId); + + Command csCommand = new Command("CREATE STREAM pageview " + + "(viewtime bigint, pageid varchar, userid varchar) " + + "WITH (registered_topic = 'pageview_topic');", + new HashMap<>()); + CommandId csCommandId = new CommandId(CommandId.Type.STREAM, "_CSASStreamGen"); + statementExecutor.handleStatement(csCommand, csCommandId); + + Command csasCommand = new Command("CREATE STREAM user1pv " + + " AS select * from pageview WHERE userid = 'user1';", + new HashMap<>()); + + CommandId csasCommandId = new CommandId(CommandId.Type.STREAM, "_CSASGen"); + statementExecutor.handleStatement(csasCommand, csasCommandId); + + Command ctasCommand = new Command("CREATE TABLE user1pvtb " + + " AS select * from pageview window tumbling(size 5 " + + "second) WHERE userid = " + + "'user1' group by pageid;", + new HashMap<>()); + + CommandId ctasCommandId = new CommandId(CommandId.Type.TABLE, "_CTASGen"); + statementExecutor.handleStatement(ctasCommand, ctasCommandId); + + Command terminateCommand = new Command("TERMINATE 1;", + new HashMap<>()); + + CommandId terminateCommandId = new CommandId(CommandId.Type.TABLE, "_TerminateGen"); + statementExecutor.handleStatement(terminateCommand, terminateCommandId); + + Map statusStore = statementExecutor.getStatuses(); + Assert.assertNotNull(statusStore); + Assert.assertEquals(statusStore.size(), 5); + Assert.assertEquals(statusStore.get(topicCommandId).getStatus(), CommandStatus.Status.SUCCESS); + Assert.assertEquals(statusStore.get(csCommandId).getStatus(), CommandStatus.Status.SUCCESS); + Assert.assertEquals(statusStore.get(csasCommandId).getStatus(), CommandStatus.Status.ERROR); + Assert.assertEquals(statusStore.get(ctasCommandId).getStatus(), CommandStatus.Status.ERROR); + Assert.assertEquals(statusStore.get(terminateCommandId).getStatus(), CommandStatus.Status.ERROR); + } + + @Test + public void handlePriorStatement() throws Exception { + TestUtils testUtils = new TestUtils(); + List> priorCommands = testUtils.getAllPriorCommandRecords(); + + CommandId topicCommandId = new CommandId(CommandId.Type.TOPIC, "_CSASTopicGen"); + CommandId csCommandId = new CommandId(CommandId.Type.STREAM, "_CSASStreamGen"); + CommandId csasCommandId = new CommandId(CommandId.Type.STREAM, "_CSASGen"); + CommandId ctasCommandId = new CommandId(CommandId.Type.TABLE, "_CTASGen"); + + statementExecutor.handleStatements(priorCommands); + + Map statusStore = statementExecutor.getStatuses(); + Assert.assertNotNull(statusStore); + Assert.assertEquals(statusStore.size(), 4); + Assert.assertEquals(statusStore.get(topicCommandId).getStatus(), CommandStatus.Status.SUCCESS); + Assert.assertEquals(statusStore.get(csCommandId).getStatus(), CommandStatus.Status.SUCCESS); + Assert.assertEquals(statusStore.get(csasCommandId).getStatus(), CommandStatus.Status.ERROR); + Assert.assertEquals(statusStore.get(ctasCommandId).getStatus(), CommandStatus.Status.ERROR); + } + +} diff --git a/ksql-rest-app/src/test/java/io/confluent/ksql/rest/server/mock/MockApplication.java b/ksql-rest-app/src/test/java/io/confluent/ksql/rest/server/mock/MockApplication.java new file mode 100644 index 000000000000..0c8670baed7b --- /dev/null +++ b/ksql-rest-app/src/test/java/io/confluent/ksql/rest/server/mock/MockApplication.java @@ -0,0 +1,21 @@ +package io.confluent.ksql.rest.server.mock; + +import javax.ws.rs.core.Configurable; + +import io.confluent.ksql.rest.server.KsqlRestConfig; +import io.confluent.rest.Application; + + +public class MockApplication extends Application { + + public MockApplication(KsqlRestConfig config) { + super(config); + } + + @Override + public void setupResources(Configurable configurable, KsqlRestConfig ksqlRestConfig) { + configurable.register(new MockKsqlResources()); + configurable.register(new MockStreamedQueryResource()); + configurable.register(new MockStatusResource()); + } +} diff --git a/ksql-rest-app/src/test/java/io/confluent/ksql/rest/server/mock/MockCommandStore.java b/ksql-rest-app/src/test/java/io/confluent/ksql/rest/server/mock/MockCommandStore.java new file mode 100644 index 000000000000..a6f06b02d7dd --- /dev/null +++ b/ksql-rest-app/src/test/java/io/confluent/ksql/rest/server/mock/MockCommandStore.java @@ -0,0 +1,83 @@ +package io.confluent.ksql.rest.server.mock; + +import org.apache.kafka.clients.consumer.Consumer; +import org.apache.kafka.clients.consumer.ConsumerRecord; +import org.apache.kafka.clients.consumer.ConsumerRecords; +import org.apache.kafka.clients.producer.Producer; +import org.apache.kafka.common.TopicPartition; + +import java.util.ArrayList; +import java.util.HashMap; +import java.util.LinkedHashMap; +import java.util.List; +import java.util.Map; +import java.util.concurrent.atomic.AtomicBoolean; + +import io.confluent.ksql.metastore.MetaStoreImpl; +import io.confluent.ksql.parser.tree.Statement; +import io.confluent.ksql.rest.server.computation.Command; +import io.confluent.ksql.rest.server.computation.CommandId; +import io.confluent.ksql.rest.server.computation.CommandIdAssigner; +import io.confluent.ksql.rest.server.computation.CommandStore; +import io.confluent.ksql.rest.server.utils.TestUtils; +import io.confluent.ksql.util.Pair; + +public class MockCommandStore extends CommandStore { + + CommandIdAssigner commandIdAssigner; + + private final AtomicBoolean closed; + private boolean isFirstCall = true; + + public MockCommandStore(String commandTopic, + Consumer commandConsumer, + Producer commandProducer, + CommandIdAssigner commandIdAssigner) { + super(commandTopic, commandConsumer, commandProducer, + new CommandIdAssigner(new MetaStoreImpl())); + + commandIdAssigner = new CommandIdAssigner(new MetaStoreImpl()); + closed = new AtomicBoolean(false); + } + + @Override + public void close() { + closed.set(true); + } + + @Override + public ConsumerRecords getNewCommands() { + List> records = new ArrayList<>(); + Map>> recordsMap = new HashMap<>(); + if (isFirstCall) { + List> commands = new TestUtils().getAllPriorCommandRecords(); + for (Pair commandIdCommandPair: commands) { + records.add(new ConsumerRecord( + "T1",10, 100, + commandIdCommandPair.getLeft(), commandIdCommandPair.getRight())); + } + + recordsMap.put(new TopicPartition("T1", 1), records); + isFirstCall = false; + } else { + close(); + } + return new ConsumerRecords<>(recordsMap); + } + + @Override + public CommandId distributeStatement( + String statementString, + Statement statement, + Map streamsProperties + ) throws Exception { + CommandId commandId = commandIdAssigner.getCommandId(statement); + return commandId; + } + + @Override + public List> getPriorCommands() { + return new TestUtils().getAllPriorCommandRecords(); + } + +} diff --git a/ksql-rest-app/src/test/java/io/confluent/ksql/rest/server/mock/MockKafkaTopicClient.java b/ksql-rest-app/src/test/java/io/confluent/ksql/rest/server/mock/MockKafkaTopicClient.java new file mode 100644 index 000000000000..b467707fef96 --- /dev/null +++ b/ksql-rest-app/src/test/java/io/confluent/ksql/rest/server/mock/MockKafkaTopicClient.java @@ -0,0 +1,44 @@ +/** + * Copyright 2017 Confluent Inc. + **/ + +package io.confluent.ksql.rest.server.mock; + +import io.confluent.ksql.util.KafkaTopicClient; +import org.apache.kafka.clients.admin.TopicDescription; + +import java.util.Collection; +import java.util.Map; +import java.util.Set; + +/** + * Fake Kafka Client is for test only, none of its methods should be called. + */ +public class MockKafkaTopicClient implements KafkaTopicClient { + + @Override + public void createTopic(String topic, int numPartitions, short replicatonFactor) { + throw new UnsupportedOperationException("Calling method on FakeObject"); + } + + @Override + public boolean isTopicExists(String topic) { + throw new UnsupportedOperationException("Calling method on FakeObject"); + } + + @Override + public Set listTopicNames() { + throw new UnsupportedOperationException("Calling method on FakeObject"); + } + + @Override + public Map describeTopics(Collection topicNames) { + throw new UnsupportedOperationException("Calling method on FakeObject"); + } + + @Override + public void close() { + throw new UnsupportedOperationException("Calling method on FakeObject"); + } + +} \ No newline at end of file diff --git a/ksql-rest-app/src/test/java/io/confluent/ksql/rest/server/mock/MockKsqkEngine.java b/ksql-rest-app/src/test/java/io/confluent/ksql/rest/server/mock/MockKsqkEngine.java new file mode 100644 index 000000000000..31e390833469 --- /dev/null +++ b/ksql-rest-app/src/test/java/io/confluent/ksql/rest/server/mock/MockKsqkEngine.java @@ -0,0 +1,13 @@ +package io.confluent.ksql.rest.server.mock; + +import io.confluent.ksql.KsqlEngine; +import io.confluent.ksql.util.KafkaTopicClient; +import io.confluent.ksql.util.KsqlConfig; + +public class MockKsqkEngine extends KsqlEngine { + + public MockKsqkEngine(KsqlConfig ksqlConfig, + KafkaTopicClient kafkaTopicClient) { + super(ksqlConfig, kafkaTopicClient); + } +} diff --git a/ksql-rest-app/src/test/java/io/confluent/ksql/rest/server/mock/MockKsqlResources.java b/ksql-rest-app/src/test/java/io/confluent/ksql/rest/server/mock/MockKsqlResources.java new file mode 100644 index 000000000000..6941ef8e71bb --- /dev/null +++ b/ksql-rest-app/src/test/java/io/confluent/ksql/rest/server/mock/MockKsqlResources.java @@ -0,0 +1,29 @@ +package io.confluent.ksql.rest.server.mock; + + +import javax.ws.rs.Consumes; +import javax.ws.rs.POST; +import javax.ws.rs.Path; +import javax.ws.rs.Produces; +import javax.ws.rs.core.MediaType; +import javax.ws.rs.core.Response; + +import io.confluent.ksql.rest.entity.ExecutionPlan; +import io.confluent.ksql.rest.entity.KsqlEntityList; +import io.confluent.ksql.rest.entity.KsqlRequest; + +@Path("/ksql") +@Consumes(MediaType.APPLICATION_JSON) +@Produces(MediaType.APPLICATION_JSON) +public class MockKsqlResources { + + + @POST + public Response handleKsqlStatements(KsqlRequest request) throws Exception { + + KsqlEntityList result = new KsqlEntityList(); + result.add(new ExecutionPlan("TestExecution plan")); + return Response.ok(result).build(); + } + +} diff --git a/ksql-rest-app/src/test/java/io/confluent/ksql/rest/server/mock/MockStatusResource.java b/ksql-rest-app/src/test/java/io/confluent/ksql/rest/server/mock/MockStatusResource.java new file mode 100644 index 000000000000..5eb596fac3b0 --- /dev/null +++ b/ksql-rest-app/src/test/java/io/confluent/ksql/rest/server/mock/MockStatusResource.java @@ -0,0 +1,38 @@ +package io.confluent.ksql.rest.server.mock; + + +import java.util.HashMap; +import java.util.Map; + +import javax.ws.rs.GET; +import javax.ws.rs.Path; +import javax.ws.rs.PathParam; +import javax.ws.rs.Produces; +import javax.ws.rs.core.MediaType; +import javax.ws.rs.core.Response; + +import io.confluent.ksql.rest.entity.CommandStatus; +import io.confluent.ksql.rest.entity.CommandStatuses; +import io.confluent.ksql.rest.server.computation.CommandId; + +@Path("/status") +@Produces(MediaType.APPLICATION_JSON) +public class MockStatusResource { + + @GET + public Response getAllStatuses() { + Map statuses = new + HashMap<>(); + statuses.put(new CommandId(CommandId.Type.TOPIC, "c1"), CommandStatus.Status.SUCCESS); + statuses.put(new CommandId(CommandId.Type.TOPIC, "c2"), CommandStatus.Status.ERROR); + CommandStatuses commandStatuses = new CommandStatuses(statuses); + return Response.ok(commandStatuses).build(); + } + + @GET + @Path("/{type}/{entity}") + public Response getStatus(@PathParam("type") String type, @PathParam("entity") String entity) + throws Exception { + return Response.ok("status").build(); + } +} diff --git a/ksql-rest-app/src/test/java/io/confluent/ksql/rest/server/mock/MockStreamedQueryResource.java b/ksql-rest-app/src/test/java/io/confluent/ksql/rest/server/mock/MockStreamedQueryResource.java new file mode 100644 index 000000000000..d93125c51d91 --- /dev/null +++ b/ksql-rest-app/src/test/java/io/confluent/ksql/rest/server/mock/MockStreamedQueryResource.java @@ -0,0 +1,39 @@ +package io.confluent.ksql.rest.server.mock; + + +import java.io.IOException; +import java.io.OutputStream; + +import javax.ws.rs.Consumes; +import javax.ws.rs.POST; +import javax.ws.rs.Path; +import javax.ws.rs.Produces; +import javax.ws.rs.WebApplicationException; +import javax.ws.rs.core.MediaType; +import javax.ws.rs.core.Response; +import javax.ws.rs.core.StreamingOutput; + +import io.confluent.ksql.rest.entity.KsqlRequest; + +@Path("/query") +@Produces(MediaType.APPLICATION_JSON) +public class MockStreamedQueryResource { + + @POST + @Consumes(MediaType.APPLICATION_JSON) + public Response streamQuery(KsqlRequest request) throws Exception { + TestStreamWriter testStreamWriter = new TestStreamWriter(); + return Response.ok().entity(testStreamWriter).build(); + } + + private class TestStreamWriter implements StreamingOutput { + + @Override + public void write(OutputStream out) throws IOException, WebApplicationException { + synchronized (out) { + out.write("Hello".getBytes()); + out.flush(); + } + } + } +} diff --git a/ksql-rest-app/src/test/java/io/confluent/ksql/rest/server/resources/KsqlResourceTest.java b/ksql-rest-app/src/test/java/io/confluent/ksql/rest/server/resources/KsqlResourceTest.java new file mode 100644 index 000000000000..938a8813b418 --- /dev/null +++ b/ksql-rest-app/src/test/java/io/confluent/ksql/rest/server/resources/KsqlResourceTest.java @@ -0,0 +1,350 @@ +package io.confluent.ksql.rest.server.resources; + +import io.confluent.kafka.serializers.KafkaJsonDeserializer; +import io.confluent.kafka.serializers.KafkaJsonDeserializerConfig; +import io.confluent.kafka.serializers.KafkaJsonSerializer; +import io.confluent.ksql.KsqlEngine; +import io.confluent.ksql.ddl.DdlConfig; +import io.confluent.ksql.metastore.KsqlStream; +import io.confluent.ksql.metastore.KsqlTable; +import io.confluent.ksql.metastore.KsqlTopic; +import io.confluent.ksql.metastore.MetaStore; +import io.confluent.ksql.parser.tree.*; +import io.confluent.ksql.rest.entity.*; +import io.confluent.ksql.rest.server.mock.MockKafkaTopicClient; +import io.confluent.ksql.rest.server.KsqlRestConfig; +import io.confluent.ksql.rest.server.StatementParser; +import io.confluent.ksql.rest.server.computation.*; +import io.confluent.ksql.serde.json.KsqlJsonTopicSerDe; +import io.confluent.ksql.util.KsqlConfig; +import org.apache.commons.lang3.concurrent.ConcurrentUtils; +import org.apache.kafka.clients.consumer.ConsumerConfig; +import org.apache.kafka.clients.consumer.KafkaConsumer; +import org.apache.kafka.clients.producer.KafkaProducer; +import org.apache.kafka.clients.producer.ProducerRecord; +import org.apache.kafka.clients.producer.RecordMetadata; +import org.apache.kafka.common.serialization.Deserializer; +import org.apache.kafka.common.serialization.Serializer; +import org.apache.kafka.connect.data.Schema; +import org.apache.kafka.connect.data.SchemaBuilder; +import org.junit.Test; + +import java.util.*; +import java.util.concurrent.Future; +import java.util.stream.Collectors; + +import static org.hamcrest.CoreMatchers.instanceOf; +import static org.junit.Assert.assertEquals; +import static org.junit.Assert.assertThat; +import static org.junit.Assert.assertTrue; + +public class KsqlResourceTest { + + private static class TestCommandProducer extends KafkaProducer { + public TestCommandProducer(Map configs, Serializer keySerializer, Serializer valueSerializer) { + super(configs, keySerializer, valueSerializer); + } + + @Override + public Future send(ProducerRecord record) { + // Fake result: only for testing purpose + return ConcurrentUtils.constantFuture(new RecordMetadata(null, 0, 0, 0, 0, 0, 0)); + } + } + + private static class TestCommandConsumer extends KafkaConsumer { + public TestCommandConsumer(Map configs, Deserializer keyDeserializer, Deserializer valueDeserializer) { + super(configs, keyDeserializer, valueDeserializer); + } + } + + private static class TestKsqlResourceUtil { + + public static final long DISTRIBUTED_COMMAND_RESPONSE_TIMEOUT = 1000; + + public static KsqlResource get() throws Exception { + + Properties defaultKsqlConfig = getDefaultKsqlConfig(); + + // Map commandConsumerProperties = config.getCommandConsumerProperties(); + KafkaConsumer commandConsumer = new TestCommandConsumer<>( + defaultKsqlConfig, + getJsonDeserializer(CommandId.class, true), + getJsonDeserializer(Command.class, false) + ); + + KafkaProducer commandProducer = new TestCommandProducer<>( + defaultKsqlConfig, + getJsonSerializer(true), + getJsonSerializer(false) + ); + + KsqlRestConfig restConfig = new KsqlRestConfig(defaultKsqlConfig); + KsqlConfig ksqlConfig = new KsqlConfig(restConfig.getKsqlStreamsProperties()); + + KsqlEngine ksqlEngine = new KsqlEngine(ksqlConfig, new MockKafkaTopicClient()); + CommandStore commandStore = new CommandStore("__COMMANDS_TOPIC", + commandConsumer, commandProducer, new CommandIdAssigner(ksqlEngine.getMetaStore())); + StatementExecutor statementExecutor = new StatementExecutor(ksqlEngine, new StatementParser(ksqlEngine)); + + addTestTopicAndSources(ksqlEngine.getMetaStore()); + return new KsqlResource(ksqlEngine, commandStore, statementExecutor, DISTRIBUTED_COMMAND_RESPONSE_TIMEOUT); + } + + private static Properties getDefaultKsqlConfig() { + Map configMap = new HashMap<>(); + configMap.put(ConsumerConfig.BOOTSTRAP_SERVERS_CONFIG, "localhost:9092"); + configMap.put("application.id", "KsqlResourceTest"); + configMap.put("commit.interval.ms", 0); + configMap.put("cache.max.bytes.buffering", 0); + configMap.put("auto.offset.reset", "earliest"); + configMap.put("ksql.command.topic.suffix", "commands"); + + Properties properties = new Properties(); + properties.putAll(configMap); + + return properties; + } + + private static void addTestTopicAndSources(MetaStore metaStore) { + Schema schema1 = SchemaBuilder.struct().field("S1_F1", Schema.BOOLEAN_SCHEMA); + KsqlTopic ksqlTopic1 = new KsqlTopic("KSQL_TOPIC_1", "KAFKA_TOPIC_1", new KsqlJsonTopicSerDe(null)); + metaStore.putTopic(ksqlTopic1); + metaStore.putSource(new KsqlTable("TEST_TABLE", schema1, schema1.field("S1_F1"), null, + ksqlTopic1, "statestore", false)); + + Schema schema2 = SchemaBuilder.struct().field("S2_F1", Schema.STRING_SCHEMA).field("S2_F2", Schema.INT32_SCHEMA); + KsqlTopic ksqlTopic2 = new KsqlTopic("KSQL_TOPIC_2", "KAFKA_TOPIC_2", new KsqlJsonTopicSerDe(null)); + metaStore.putTopic(ksqlTopic2); + metaStore.putSource(new KsqlStream("TEST_STREAM", schema2, schema2.field("S2_F2"), null, + ksqlTopic2)); + } + + private static Deserializer getJsonDeserializer(Class classs, boolean isKey) { + Deserializer result = new KafkaJsonDeserializer<>(); + String typeConfigProperty = isKey + ? KafkaJsonDeserializerConfig.JSON_KEY_TYPE + : KafkaJsonDeserializerConfig.JSON_VALUE_TYPE; + + Map props = Collections.singletonMap( + typeConfigProperty, + classs + ); + result.configure(props, isKey); + return result; + } + + private static Serializer getJsonSerializer(boolean isKey) { + Serializer result = new KafkaJsonSerializer<>(); + result.configure(Collections.emptyMap(), isKey); + return result; + } + + } + + private R makeSingleRequest( + KsqlResource testResource, + String ksqlString, + Statement ksqlStatement, + Map streamsProperties, + Class responseClass + ) throws Exception{ + + Object responseEntity = testResource.handleKsqlStatements( + new KsqlRequest(ksqlString, streamsProperties) + ).getEntity(); + assertThat(responseEntity, instanceOf(List.class)); + + List responseList = (List) responseEntity; + assertEquals(1, responseList.size()); + + Object responseElement = responseList.get(0); + assertThat(responseElement, instanceOf(responseClass)); + + return responseClass.cast(responseElement); + } + + @Test + public void testInstantRegisterTopic() throws Exception { + KsqlResource testResource = TestKsqlResourceUtil.get(); + + final String ksqlTopic = "FOO"; + final String kafkaTopic = "bar"; + final String format = "json"; + + final String ksqlString = + String.format("REGISTER TOPIC %s WITH (kafka_topic='%s', value_format='%s');", + ksqlTopic, + kafkaTopic, format); + + final Map createTopicProperties = new HashMap<>(); + createTopicProperties.put(DdlConfig.KAFKA_TOPIC_NAME_PROPERTY, new StringLiteral(kafkaTopic)); + createTopicProperties.put(DdlConfig.VALUE_FORMAT_PROPERTY, new StringLiteral(format)); + + final RegisterTopic ksqlStatement = new RegisterTopic( + QualifiedName.of(ksqlTopic), + false, + createTopicProperties + ); + + final CommandId commandId = new CommandId(CommandId.Type.TOPIC, ksqlTopic); + final CommandStatus commandStatus = new CommandStatus( + CommandStatus.Status.QUEUED, + "Statement written to command topic" + ); + + final CommandStatusEntity expectedCommandStatusEntity = + new CommandStatusEntity(ksqlString, commandId, commandStatus); + + final Map streamsProperties = Collections.emptyMap(); + + KsqlEntity testKsqlEntity = makeSingleRequest( + testResource, + ksqlString, + ksqlStatement, + streamsProperties, + KsqlEntity.class + ); + + assertEquals(expectedCommandStatusEntity, testKsqlEntity); + } + + @Test + public void testErroneousStatement() throws Exception { + KsqlResource testResource = TestKsqlResourceUtil.get(); + final String ksqlString = "DESCRIBE nonexistent_table;"; + final ShowColumns ksqlStatement = new ShowColumns(QualifiedName.of("nonexistent_table"), false); + + KsqlEntity resultEntity = makeSingleRequest( + testResource, + ksqlString, + ksqlStatement, + Collections.emptyMap(), + ErrorMessageEntity.class + ); + + assertEquals(ErrorMessageEntity.class, resultEntity.getClass()); + } + + @Test + public void testListRegisteredTopics() throws Exception { + KsqlResource testResource = TestKsqlResourceUtil.get(); + final String ksqlString = "LIST REGISTERED TOPICS;"; + final ListRegisteredTopics ksqlStatement = new ListRegisteredTopics(Optional.empty()); + + KsqlTopicsList ksqlTopicsList = makeSingleRequest( + testResource, + ksqlString, + ksqlStatement, + Collections.emptyMap(), + KsqlTopicsList.class + ); + + Collection testTopics = ksqlTopicsList.getTopics(); + Collection expectedTopics = testResource.getKsqlEngine().getMetaStore() + .getAllKsqlTopics().values().stream() + .map(KsqlTopicInfo::new) + .collect(Collectors.toList()); + + assertEquals(expectedTopics.size(), testTopics.size()); + + for (KsqlTopicInfo testTopic : testTopics) { + assertTrue(expectedTopics.contains(testTopic)); + } + + for (KsqlTopicInfo expectedTopic : expectedTopics) { + assertTrue(testTopics.contains(expectedTopic)); + } + } + + @Test + public void testShowQueries() throws Exception { + KsqlResource testResource = TestKsqlResourceUtil.get(); + final String ksqlString = "SHOW QUERIES;"; + final ListQueries ksqlStatement = new ListQueries(Optional.empty()); + final String testKafkaTopic = "lol"; + + final String testQueryStatement = String.format( + "CREATE STREAM %s AS SELECT * FROM test_stream WHERE S2_F2 > 69;", + testKafkaTopic + ); + + Queries queries = makeSingleRequest( + testResource, + ksqlString, + ksqlStatement, + Collections.emptyMap(), + Queries.class + ); + List testQueries = queries.getQueries(); + + assertEquals(0, testQueries.size()); + } + + @Test + public void testDescribeStatement() throws Exception { + KsqlResource testResource = TestKsqlResourceUtil.get(); + final String tableName = "TEST_TABLE"; + final String ksqlString = String.format("DESCRIBE %s;", tableName); + final ShowColumns ksqlStatement = new ShowColumns(QualifiedName.of(tableName), false); + + SourceDescription testDescription = makeSingleRequest( + testResource, + ksqlString, + ksqlStatement, + Collections.emptyMap(), + SourceDescription.class + ); + + SourceDescription expectedDescription = + new SourceDescription(ksqlString, testResource.getKsqlEngine().getMetaStore().getSource(tableName)); + + assertEquals(expectedDescription, testDescription); + } + + @Test + public void testListStreamsStatement() throws Exception { + KsqlResource testResource = TestKsqlResourceUtil.get(); + final String ksqlString = "LIST STREAMS;"; + final ListStreams ksqlStatement = new ListStreams(Optional.empty()); + + StreamsList streamsList = makeSingleRequest( + testResource, + ksqlString, + ksqlStatement, + Collections.emptyMap(), + StreamsList.class + ); + + List testStreams = streamsList.getStreams(); + assertEquals(1, testStreams.size()); + + StreamsList.StreamInfo expectedStream = + new StreamsList.StreamInfo((KsqlStream) testResource.getKsqlEngine().getMetaStore().getSource("TEST_STREAM")); + + assertEquals(expectedStream, testStreams.get(0)); + } + + @Test + public void testListTablesStatement() throws Exception { + KsqlResource testResource = TestKsqlResourceUtil.get(); + final String ksqlString = "LIST TABLES;"; + final ListTables ksqlStatement = new ListTables(Optional.empty()); + + TablesList tablesList = makeSingleRequest( + testResource, + ksqlString, + ksqlStatement, + Collections.emptyMap(), + TablesList.class + ); + + List testTables = tablesList.getTables(); + assertEquals(1, testTables.size()); + + TablesList.TableInfo expectedTable = + new TablesList.TableInfo((KsqlTable) testResource.getKsqlEngine().getMetaStore().getSource("TEST_TABLE")); + + assertEquals(expectedTable, testTables.get(0)); + } +} diff --git a/ksql-rest-app/src/test/java/io/confluent/ksql/rest/server/resources/StatusResourceTest.java b/ksql-rest-app/src/test/java/io/confluent/ksql/rest/server/resources/StatusResourceTest.java new file mode 100644 index 000000000000..df7d62e0859a --- /dev/null +++ b/ksql-rest-app/src/test/java/io/confluent/ksql/rest/server/resources/StatusResourceTest.java @@ -0,0 +1,89 @@ +package io.confluent.ksql.rest.server.resources; + +import io.confluent.ksql.rest.entity.CommandStatus; +import io.confluent.ksql.rest.entity.CommandStatuses; +import io.confluent.ksql.rest.server.computation.CommandId; +import io.confluent.ksql.rest.server.computation.StatementExecutor; +import org.junit.Test; + +import java.util.HashMap; +import java.util.Map; +import java.util.Optional; + +import static org.easymock.EasyMock.anyObject; +import static org.easymock.EasyMock.expect; +import static org.easymock.EasyMock.mock; +import static org.easymock.EasyMock.replay; +import static org.hamcrest.CoreMatchers.instanceOf; +import static org.junit.Assert.assertEquals; +import static org.junit.Assert.assertThat; + +public class StatusResourceTest { + + private static final Map mockCommandStatuses; + + static { + mockCommandStatuses = new HashMap<>(); + + mockCommandStatuses.put( + new CommandId(CommandId.Type.TOPIC, "test_topic"), + new CommandStatus(CommandStatus.Status.SUCCESS, "Topic created successfully") + ); + + mockCommandStatuses.put( + new CommandId(CommandId.Type.STREAM, "test_stream"), + new CommandStatus(CommandStatus.Status.ERROR, "Hi Ewen!") + ); + + mockCommandStatuses.put( + new CommandId(CommandId.Type.TERMINATE, "5"), + new CommandStatus(CommandStatus.Status.QUEUED, "Command written to command topic") + ); + } + + private StatusResource getTestStatusResource() { + StatementExecutor mockStatementExecutor = mock(StatementExecutor.class); + + expect(mockStatementExecutor.getStatuses()).andReturn(mockCommandStatuses); + + for (Map.Entry commandEntry : mockCommandStatuses.entrySet()) { + expect(mockStatementExecutor.getStatus(commandEntry.getKey())).andReturn(Optional.of(commandEntry.getValue())); + } + + expect(mockStatementExecutor.getStatus(anyObject(CommandId.class))).andReturn(Optional.empty()); + + replay(mockStatementExecutor); + + return new StatusResource(mockStatementExecutor); + } + + @Test + public void testGetAllStatuses() { + StatusResource testResource = getTestStatusResource(); + + Object statusesEntity = testResource.getAllStatuses().getEntity(); + assertThat(statusesEntity, instanceOf(CommandStatuses.class)); + CommandStatuses testCommandStatuses = (CommandStatuses) statusesEntity; + + Map expectedCommandStatuses = + CommandStatuses.fromFullStatuses(mockCommandStatuses); + + assertEquals(expectedCommandStatuses, testCommandStatuses); + } + + @Test + public void testGetStatus() throws Exception { + StatusResource testResource = getTestStatusResource(); + + for (Map.Entry commandEntry : mockCommandStatuses.entrySet()) { + CommandId commandId = commandEntry.getKey(); + CommandStatus expectedCommandStatus = commandEntry.getValue(); + + Object statusEntity = testResource.getStatus(commandId.getType().name(), commandId.getEntity()).getEntity(); + assertThat(statusEntity, instanceOf(CommandStatus.class)); + CommandStatus testCommandStatus = (CommandStatus) statusEntity; + + assertEquals(expectedCommandStatus, testCommandStatus); + } + } +} diff --git a/ksql-rest-app/src/test/java/io/confluent/ksql/rest/server/resources/StreamedQueryResourceTest.java b/ksql-rest-app/src/test/java/io/confluent/ksql/rest/server/resources/StreamedQueryResourceTest.java new file mode 100644 index 000000000000..8d2db36465da --- /dev/null +++ b/ksql-rest-app/src/test/java/io/confluent/ksql/rest/server/resources/StreamedQueryResourceTest.java @@ -0,0 +1,221 @@ +package io.confluent.ksql.rest.server.resources; + +import com.fasterxml.jackson.databind.ObjectMapper; +import io.confluent.ksql.KsqlEngine; +import io.confluent.ksql.metastore.DataSource; +import io.confluent.ksql.parser.tree.Query; +import io.confluent.ksql.physical.GenericRow; +import io.confluent.ksql.planner.plan.OutputNode; +import io.confluent.ksql.rest.entity.KsqlRequest; +import io.confluent.ksql.rest.entity.StreamedRow; +import io.confluent.ksql.rest.server.StatementParser; +import io.confluent.ksql.rest.server.resources.streaming.StreamedQueryResource; +import io.confluent.ksql.util.QueuedQueryMetadata; +import org.apache.kafka.connect.data.SchemaBuilder; +import org.apache.kafka.streams.KafkaStreams; +import org.apache.kafka.streams.KeyValue; +import org.junit.Test; + +import javax.ws.rs.core.Response; +import javax.ws.rs.core.StreamingOutput; +import java.io.EOFException; +import java.io.IOException; +import java.io.PipedInputStream; +import java.io.PipedOutputStream; +import java.util.Collections; +import java.util.LinkedList; +import java.util.Map; +import java.util.Scanner; +import java.util.concurrent.SynchronousQueue; +import java.util.concurrent.TimeUnit; +import java.util.concurrent.atomic.AtomicReference; + +import static org.easymock.EasyMock.anyObject; +import static org.easymock.EasyMock.expect; +import static org.easymock.EasyMock.expectLastCall; +import static org.easymock.EasyMock.mock; +import static org.easymock.EasyMock.replay; +import static org.easymock.EasyMock.verify; +import static org.junit.Assert.assertEquals; + +public class StreamedQueryResourceTest { + + @Test + public void testStreamQuery() throws Throwable { + final AtomicReference threadException = new AtomicReference<>(null); + final Thread.UncaughtExceptionHandler threadExceptionHandler = + (thread, exception) -> threadException.compareAndSet(null, exception); + + final String queryString = "SELECT * FROM test_stream;"; + + final SynchronousQueue> rowQueue = new SynchronousQueue<>(); + + final LinkedList writtenRows = new LinkedList<>(); + + final Thread rowQueuePopulatorThread = new Thread(new Runnable() { + @Override + public void run() { + try { + for (int i = 0; ; i++) { + String key = Integer.toString(i); + GenericRow value = new GenericRow(Collections.singletonList(i)); + synchronized (writtenRows) { + writtenRows.add(value); + } + rowQueue.put(new KeyValue<>(key, value)); + } + } catch (InterruptedException exception) { + // This should happen during the test, so it's fine + } + } + }, "Row Queue Populator"); + rowQueuePopulatorThread.setUncaughtExceptionHandler(threadExceptionHandler); + rowQueuePopulatorThread.start(); + + final KafkaStreams mockKafkaStreams = mock(KafkaStreams.class); + mockKafkaStreams.start(); + expectLastCall(); + mockKafkaStreams.setUncaughtExceptionHandler(anyObject(Thread.UncaughtExceptionHandler.class)); + expectLastCall(); + expect(mockKafkaStreams.close(100L, TimeUnit.MILLISECONDS)).andReturn(true); + mockKafkaStreams.cleanUp(); + expectLastCall(); + + final OutputNode mockOutputNode = mock(OutputNode.class); + expect(mockOutputNode.getSchema()) + .andReturn(SchemaBuilder.struct().field("f1", SchemaBuilder.INT32_SCHEMA)); + + final QueuedQueryMetadata queuedQueryMetadata = + new QueuedQueryMetadata(queryString, mockKafkaStreams, mockOutputNode, "", + rowQueue, DataSource.DataSourceType.KSTREAM); + + final Map requestStreamsProperties = Collections.emptyMap(); + + KsqlEngine mockKsqlEngine = mock(KsqlEngine.class); + expect(mockKsqlEngine.buildMultipleQueries(true, queryString, requestStreamsProperties)) + .andReturn(Collections.singletonList(queuedQueryMetadata)); + + StatementParser mockStatementParser = mock(StatementParser.class); + expect(mockStatementParser.parseSingleStatement(queryString)).andReturn(mock(Query.class)); + + replay(mockKsqlEngine, mockStatementParser, mockKafkaStreams, mockOutputNode); + + StreamedQueryResource testResource = new StreamedQueryResource(mockKsqlEngine, mockStatementParser, 1000); + + Response response = + testResource.streamQuery(new KsqlRequest(queryString, requestStreamsProperties)); + PipedOutputStream responseOutputStream = new EOFPipedOutputStream(); + PipedInputStream responseInputStream = new PipedInputStream(responseOutputStream, 1); + StreamingOutput responseStream = (StreamingOutput) response.getEntity(); + + final Thread queryWriterThread = new Thread(new Runnable() { + @Override + public void run() { + try { + responseStream.write(responseOutputStream); + } catch (EOFException exception) { + // It's fine + } catch (IOException exception) { + throw new RuntimeException(exception); + } + } + }, "Query Writer"); + queryWriterThread.setUncaughtExceptionHandler(threadExceptionHandler); + queryWriterThread.start(); + + Scanner responseScanner = new Scanner(responseInputStream); + ObjectMapper objectMapper = new ObjectMapper(); + for (int i = 0; i < 5; i++) { + if (!responseScanner.hasNextLine()) { + throw new Exception("Response input stream failed to have expected line available"); + } + String responseLine = responseScanner.nextLine(); + if (responseLine.trim().isEmpty()) { + i--; + } else { + GenericRow expectedRow; + synchronized (writtenRows) { + expectedRow = writtenRows.poll(); + } + GenericRow testRow = objectMapper.readValue(responseLine, StreamedRow.class).getRow(); + assertEquals(expectedRow, testRow); + } + } + + responseOutputStream.close(); + + queryWriterThread.join(); + rowQueuePopulatorThread.interrupt(); + rowQueuePopulatorThread.join(); + + // Definitely want to make sure that the Kafka Streams instance has been closed and cleaned up + verify(mockKafkaStreams); + + // If one of the other threads has somehow managed to throw an exception without breaking things up until this + // point, we throw that exception now in the main thread and cause the test to fail + Throwable exception = threadException.get(); + if (exception != null) { + throw exception; + } + } + + // Have to mimic the behavior of the OutputStream that's usually passed to the QueryStreamWriter class's write() + // method, which is to throw an EOFException if any write attempts are made after the connection has terminated + private static class EOFPipedOutputStream extends PipedOutputStream { + + private boolean closed; + + public EOFPipedOutputStream() { + super(); + closed = false; + } + + private void throwIfClosed() throws IOException { + if (closed) { + throw new EOFException(); + } + } + + @Override + public void close() throws IOException { + closed = true; + super.close(); + } + + @Override + public void flush() throws IOException { + throwIfClosed(); + try { + super.flush(); + } catch (IOException exception) { + // Might have been closed during the call to super.flush(); + throwIfClosed(); + throw exception; + } + } + + @Override + public void write(byte[] b, int off, int len) throws IOException { + throwIfClosed(); + try { + super.write(b, off, len); + } catch (IOException exception) { + // Might have been closed during the call to super.write(); + throwIfClosed(); + throw exception; + } + } + + @Override + public void write(int b) throws IOException { + throwIfClosed(); + try { + super.write(b); + } catch (IOException exception) { + // Might have been closed during the call to super.write(); + throwIfClosed(); + throw exception; + } + } + } +} diff --git a/ksql-rest-app/src/test/java/io/confluent/ksql/rest/server/utils/TestUtils.java b/ksql-rest-app/src/test/java/io/confluent/ksql/rest/server/utils/TestUtils.java new file mode 100644 index 000000000000..208631eb427b --- /dev/null +++ b/ksql-rest-app/src/test/java/io/confluent/ksql/rest/server/utils/TestUtils.java @@ -0,0 +1,62 @@ +package io.confluent.ksql.rest.server.utils; + + +import org.apache.kafka.clients.consumer.ConsumerRecord; + +import java.util.ArrayList; +import java.util.HashMap; +import java.util.LinkedHashMap; +import java.util.List; +import java.util.Map; + +import io.confluent.ksql.rest.server.computation.Command; +import io.confluent.ksql.rest.server.computation.CommandId; +import io.confluent.ksql.util.KsqlConfig; +import io.confluent.ksql.util.Pair; + +public class TestUtils { + + public static KsqlConfig getMockKsqlConfig() { + Map props = new HashMap<>(); + props.put("application.id", "ksqlStatementExecutorTest"); + props.put("bootstrap.servers", "localhost:9092"); + return new KsqlConfig(props); + } + + public List> getAllPriorCommandRecords() { + List> priorCommands = new ArrayList<>(); + + Command topicCommand = new Command("REGISTER TOPIC pageview_topic WITH " + + "(value_format = 'json', " + + "kafka_topic='pageview_topic_json');", new HashMap<>()); + CommandId topicCommandId = new CommandId(CommandId.Type.TOPIC, "_CSASTopicGen"); + priorCommands.add(new Pair<>(topicCommandId, topicCommand)); + + + Command csCommand = new Command("CREATE STREAM pageview " + + "(viewtime bigint, pageid varchar, userid varchar) " + + "WITH (registered_topic = 'pageview_topic');", + new HashMap<>()); + CommandId csCommandId = new CommandId(CommandId.Type.STREAM, "_CSASStreamGen"); + priorCommands.add(new Pair<>(csCommandId, csCommand)); + + Command csasCommand = new Command("CREATE STREAM user1pv " + + " AS select * from pageview WHERE userid = 'user1';", + new HashMap<>()); + + CommandId csasCommandId = new CommandId(CommandId.Type.STREAM, "_CSASGen"); + priorCommands.add(new Pair<>(csasCommandId, csasCommand)); + + + Command ctasCommand = new Command("CREATE TABLE user1pvtb " + + " AS select * from pageview window tumbling(size 5 " + + "second) WHERE userid = " + + "'user1' group by pageid;", + new HashMap<>()); + + CommandId ctasCommandId = new CommandId(CommandId.Type.TABLE, "_CTASGen"); + priorCommands.add(new Pair<>(ctasCommandId, ctasCommand)); + + return priorCommands; + } +} diff --git a/licenses/LICENSE-annotations-3.0.1.txt b/licenses/LICENSE-annotations-3.0.1.txt new file mode 100644 index 000000000000..d64569567334 --- /dev/null +++ b/licenses/LICENSE-annotations-3.0.1.txt @@ -0,0 +1,202 @@ + + Apache License + Version 2.0, January 2004 + http://www.apache.org/licenses/ + + TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION + + 1. Definitions. + + "License" shall mean the terms and conditions for use, reproduction, + and distribution as defined by Sections 1 through 9 of this document. + + "Licensor" shall mean the copyright owner or entity authorized by + the copyright owner that is granting the License. + + "Legal Entity" shall mean the union of the acting entity and all + other entities that control, are controlled by, or are under common + control with that entity. For the purposes of this definition, + "control" means (i) the power, direct or indirect, to cause the + direction or management of such entity, whether by contract or + otherwise, or (ii) ownership of fifty percent (50%) or more of the + outstanding shares, or (iii) beneficial ownership of such entity. + + "You" (or "Your") shall mean an individual or Legal Entity + exercising permissions granted by this License. + + "Source" form shall mean the preferred form for making modifications, + including but not limited to software source code, documentation + source, and configuration files. + + "Object" form shall mean any form resulting from mechanical + transformation or translation of a Source form, including but + not limited to compiled object code, generated documentation, + and conversions to other media types. + + "Work" shall mean the work of authorship, whether in Source or + Object form, made available under the License, as indicated by a + copyright notice that is included in or attached to the work + (an example is provided in the Appendix below). + + "Derivative Works" shall mean any work, whether in Source or Object + form, that is based on (or derived from) the Work and for which the + editorial revisions, annotations, elaborations, or other modifications + represent, as a whole, an original work of authorship. For the purposes + of this License, Derivative Works shall not include works that remain + separable from, or merely link (or bind by name) to the interfaces of, + the Work and Derivative Works thereof. + + "Contribution" shall mean any work of authorship, including + the original version of the Work and any modifications or additions + to that Work or Derivative Works thereof, that is intentionally + submitted to Licensor for inclusion in the Work by the copyright owner + or by an individual or Legal Entity authorized to submit on behalf of + the copyright owner. For the purposes of this definition, "submitted" + means any form of electronic, verbal, or written communication sent + to the Licensor or its representatives, including but not limited to + communication on electronic mailing lists, source code control systems, + and issue tracking systems that are managed by, or on behalf of, the + Licensor for the purpose of discussing and improving the Work, but + excluding communication that is conspicuously marked or otherwise + designated in writing by the copyright owner as "Not a Contribution." + + "Contributor" shall mean Licensor and any individual or Legal Entity + on behalf of whom a Contribution has been received by Licensor and + subsequently incorporated within the Work. + + 2. Grant of Copyright License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + copyright license to reproduce, prepare Derivative Works of, + publicly display, publicly perform, sublicense, and distribute the + Work and such Derivative Works in Source or Object form. + + 3. Grant of Patent License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + (except as stated in this section) patent license to make, have made, + use, offer to sell, sell, import, and otherwise transfer the Work, + where such license applies only to those patent claims licensable + by such Contributor that are necessarily infringed by their + Contribution(s) alone or by combination of their Contribution(s) + with the Work to which such Contribution(s) was submitted. If You + institute patent litigation against any entity (including a + cross-claim or counterclaim in a lawsuit) alleging that the Work + or a Contribution incorporated within the Work constitutes direct + or contributory patent infringement, then any patent licenses + granted to You under this License for that Work shall terminate + as of the date such litigation is filed. + + 4. Redistribution. You may reproduce and distribute copies of the + Work or Derivative Works thereof in any medium, with or without + modifications, and in Source or Object form, provided that You + meet the following conditions: + + (a) You must give any other recipients of the Work or + Derivative Works a copy of this License; and + + (b) You must cause any modified files to carry prominent notices + stating that You changed the files; and + + (c) You must retain, in the Source form of any Derivative Works + that You distribute, all copyright, patent, trademark, and + attribution notices from the Source form of the Work, + excluding those notices that do not pertain to any part of + the Derivative Works; and + + (d) If the Work includes a "NOTICE" text file as part of its + distribution, then any Derivative Works that You distribute must + include a readable copy of the attribution notices contained + within such NOTICE file, excluding those notices that do not + pertain to any part of the Derivative Works, in at least one + of the following places: within a NOTICE text file distributed + as part of the Derivative Works; within the Source form or + documentation, if provided along with the Derivative Works; or, + within a display generated by the Derivative Works, if and + wherever such third-party notices normally appear. The contents + of the NOTICE file are for informational purposes only and + do not modify the License. You may add Your own attribution + notices within Derivative Works that You distribute, alongside + or as an addendum to the NOTICE text from the Work, provided + that such additional attribution notices cannot be construed + as modifying the License. + + You may add Your own copyright statement to Your modifications and + may provide additional or different license terms and conditions + for use, reproduction, or distribution of Your modifications, or + for any such Derivative Works as a whole, provided Your use, + reproduction, and distribution of the Work otherwise complies with + the conditions stated in this License. + + 5. Submission of Contributions. Unless You explicitly state otherwise, + any Contribution intentionally submitted for inclusion in the Work + by You to the Licensor shall be under the terms and conditions of + this License, without any additional terms or conditions. + Notwithstanding the above, nothing herein shall supersede or modify + the terms of any separate license agreement you may have executed + with Licensor regarding such Contributions. + + 6. Trademarks. This License does not grant permission to use the trade + names, trademarks, service marks, or product names of the Licensor, + except as required for reasonable and customary use in describing the + origin of the Work and reproducing the content of the NOTICE file. + + 7. Disclaimer of Warranty. Unless required by applicable law or + agreed to in writing, Licensor provides the Work (and each + Contributor provides its Contributions) on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or + implied, including, without limitation, any warranties or conditions + of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A + PARTICULAR PURPOSE. You are solely responsible for determining the + appropriateness of using or redistributing the Work and assume any + risks associated with Your exercise of permissions under this License. + + 8. Limitation of Liability. In no event and under no legal theory, + whether in tort (including negligence), contract, or otherwise, + unless required by applicable law (such as deliberate and grossly + negligent acts) or agreed to in writing, shall any Contributor be + liable to You for damages, including any direct, indirect, special, + incidental, or consequential damages of any character arising as a + result of this License or out of the use or inability to use the + Work (including but not limited to damages for loss of goodwill, + work stoppage, computer failure or malfunction, or any and all + other commercial damages or losses), even if such Contributor + has been advised of the possibility of such damages. + + 9. Accepting Warranty or Additional Liability. While redistributing + the Work or Derivative Works thereof, You may choose to offer, + and charge a fee for, acceptance of support, warranty, indemnity, + or other liability obligations and/or rights consistent with this + License. However, in accepting such obligations, You may act only + on Your own behalf and on Your sole responsibility, not on behalf + of any other Contributor, and only if You agree to indemnify, + defend, and hold each Contributor harmless for any liability + incurred by, or claims asserted against, such Contributor by reason + of your accepting any such warranty or additional liability. + + END OF TERMS AND CONDITIONS + + APPENDIX: How to apply the Apache License to your work. + + To apply the Apache License to your work, attach the following + boilerplate notice, with the fields enclosed by brackets "[]" + replaced with your own identifying information. (Don't include + the brackets!) The text should be enclosed in the appropriate + comment syntax for the file format. We also recommend that a + file or class name and description of purpose be included on the + same "printed page" as the copyright notice for easier + identification within third-party archives. + + Copyright [yyyy] [name of copyright owner] + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. diff --git a/licenses/LICENSE-antlr4-runtime-4.7.txt b/licenses/LICENSE-antlr4-runtime-4.7.txt new file mode 100644 index 000000000000..77d3c02eac2b --- /dev/null +++ b/licenses/LICENSE-antlr4-runtime-4.7.txt @@ -0,0 +1,148 @@ + + + + + + + + + + ANTLR v4 License + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ +ANTLR + + + + +
+ +
+
+ +
+ +
+ +

ANTLR 4 License

+ +[The BSD License]
+Copyright (c) 2012 Terence Parr and Sam Harwell
+All rights reserved.
+ +

+Redistribution and use in source and binary forms, with or without modification, are permitted provided that the following conditions are met: + +

    +
  • +Redistributions of source code must retain the above copyright notice, this list of conditions and the following disclaimer. + +
  • Redistributions in binary form must reproduce the above copyright notice, this list of conditions and the following disclaimer in the documentation and/or other materials provided with the distribution. +
  • Neither the name of the author nor the names of its contributors may be used to endorse or promote products derived from this software without specific prior written permission. + +
+ +

THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.

+ +
+

Developer's Certificate of Origin

+ +

All contributors to ANTLR v4 must formally agree to abide by the +certificate +of origin by signing on the bottom of that document. To contribute:

+ +
    +
  • fork the ANTLR v4 github repository +
  • make your changes +
  • [first time contributors]: sign contributors.txt by adding your github userid, full +name, email address (you can obscure your e-mail, but it must be +computable by human), and date. +
  • commit your changes +
  • send a pull request +
+ +After you have signed once, you don't have to sign future pull +requests. We can merge by simply checking to see your name is in the +contributors file. + +
+ + + +
+ + +
+ +
+
+ +
+ +
+ + + + + diff --git a/licenses/LICENSE-avro-1.8.1.txt b/licenses/LICENSE-avro-1.8.1.txt new file mode 100644 index 000000000000..d64569567334 --- /dev/null +++ b/licenses/LICENSE-avro-1.8.1.txt @@ -0,0 +1,202 @@ + + Apache License + Version 2.0, January 2004 + http://www.apache.org/licenses/ + + TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION + + 1. Definitions. + + "License" shall mean the terms and conditions for use, reproduction, + and distribution as defined by Sections 1 through 9 of this document. + + "Licensor" shall mean the copyright owner or entity authorized by + the copyright owner that is granting the License. + + "Legal Entity" shall mean the union of the acting entity and all + other entities that control, are controlled by, or are under common + control with that entity. For the purposes of this definition, + "control" means (i) the power, direct or indirect, to cause the + direction or management of such entity, whether by contract or + otherwise, or (ii) ownership of fifty percent (50%) or more of the + outstanding shares, or (iii) beneficial ownership of such entity. + + "You" (or "Your") shall mean an individual or Legal Entity + exercising permissions granted by this License. + + "Source" form shall mean the preferred form for making modifications, + including but not limited to software source code, documentation + source, and configuration files. + + "Object" form shall mean any form resulting from mechanical + transformation or translation of a Source form, including but + not limited to compiled object code, generated documentation, + and conversions to other media types. + + "Work" shall mean the work of authorship, whether in Source or + Object form, made available under the License, as indicated by a + copyright notice that is included in or attached to the work + (an example is provided in the Appendix below). + + "Derivative Works" shall mean any work, whether in Source or Object + form, that is based on (or derived from) the Work and for which the + editorial revisions, annotations, elaborations, or other modifications + represent, as a whole, an original work of authorship. For the purposes + of this License, Derivative Works shall not include works that remain + separable from, or merely link (or bind by name) to the interfaces of, + the Work and Derivative Works thereof. + + "Contribution" shall mean any work of authorship, including + the original version of the Work and any modifications or additions + to that Work or Derivative Works thereof, that is intentionally + submitted to Licensor for inclusion in the Work by the copyright owner + or by an individual or Legal Entity authorized to submit on behalf of + the copyright owner. For the purposes of this definition, "submitted" + means any form of electronic, verbal, or written communication sent + to the Licensor or its representatives, including but not limited to + communication on electronic mailing lists, source code control systems, + and issue tracking systems that are managed by, or on behalf of, the + Licensor for the purpose of discussing and improving the Work, but + excluding communication that is conspicuously marked or otherwise + designated in writing by the copyright owner as "Not a Contribution." + + "Contributor" shall mean Licensor and any individual or Legal Entity + on behalf of whom a Contribution has been received by Licensor and + subsequently incorporated within the Work. + + 2. Grant of Copyright License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + copyright license to reproduce, prepare Derivative Works of, + publicly display, publicly perform, sublicense, and distribute the + Work and such Derivative Works in Source or Object form. + + 3. Grant of Patent License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + (except as stated in this section) patent license to make, have made, + use, offer to sell, sell, import, and otherwise transfer the Work, + where such license applies only to those patent claims licensable + by such Contributor that are necessarily infringed by their + Contribution(s) alone or by combination of their Contribution(s) + with the Work to which such Contribution(s) was submitted. If You + institute patent litigation against any entity (including a + cross-claim or counterclaim in a lawsuit) alleging that the Work + or a Contribution incorporated within the Work constitutes direct + or contributory patent infringement, then any patent licenses + granted to You under this License for that Work shall terminate + as of the date such litigation is filed. + + 4. Redistribution. You may reproduce and distribute copies of the + Work or Derivative Works thereof in any medium, with or without + modifications, and in Source or Object form, provided that You + meet the following conditions: + + (a) You must give any other recipients of the Work or + Derivative Works a copy of this License; and + + (b) You must cause any modified files to carry prominent notices + stating that You changed the files; and + + (c) You must retain, in the Source form of any Derivative Works + that You distribute, all copyright, patent, trademark, and + attribution notices from the Source form of the Work, + excluding those notices that do not pertain to any part of + the Derivative Works; and + + (d) If the Work includes a "NOTICE" text file as part of its + distribution, then any Derivative Works that You distribute must + include a readable copy of the attribution notices contained + within such NOTICE file, excluding those notices that do not + pertain to any part of the Derivative Works, in at least one + of the following places: within a NOTICE text file distributed + as part of the Derivative Works; within the Source form or + documentation, if provided along with the Derivative Works; or, + within a display generated by the Derivative Works, if and + wherever such third-party notices normally appear. The contents + of the NOTICE file are for informational purposes only and + do not modify the License. You may add Your own attribution + notices within Derivative Works that You distribute, alongside + or as an addendum to the NOTICE text from the Work, provided + that such additional attribution notices cannot be construed + as modifying the License. + + You may add Your own copyright statement to Your modifications and + may provide additional or different license terms and conditions + for use, reproduction, or distribution of Your modifications, or + for any such Derivative Works as a whole, provided Your use, + reproduction, and distribution of the Work otherwise complies with + the conditions stated in this License. + + 5. Submission of Contributions. Unless You explicitly state otherwise, + any Contribution intentionally submitted for inclusion in the Work + by You to the Licensor shall be under the terms and conditions of + this License, without any additional terms or conditions. + Notwithstanding the above, nothing herein shall supersede or modify + the terms of any separate license agreement you may have executed + with Licensor regarding such Contributions. + + 6. Trademarks. This License does not grant permission to use the trade + names, trademarks, service marks, or product names of the Licensor, + except as required for reasonable and customary use in describing the + origin of the Work and reproducing the content of the NOTICE file. + + 7. Disclaimer of Warranty. Unless required by applicable law or + agreed to in writing, Licensor provides the Work (and each + Contributor provides its Contributions) on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or + implied, including, without limitation, any warranties or conditions + of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A + PARTICULAR PURPOSE. You are solely responsible for determining the + appropriateness of using or redistributing the Work and assume any + risks associated with Your exercise of permissions under this License. + + 8. Limitation of Liability. In no event and under no legal theory, + whether in tort (including negligence), contract, or otherwise, + unless required by applicable law (such as deliberate and grossly + negligent acts) or agreed to in writing, shall any Contributor be + liable to You for damages, including any direct, indirect, special, + incidental, or consequential damages of any character arising as a + result of this License or out of the use or inability to use the + Work (including but not limited to damages for loss of goodwill, + work stoppage, computer failure or malfunction, or any and all + other commercial damages or losses), even if such Contributor + has been advised of the possibility of such damages. + + 9. Accepting Warranty or Additional Liability. While redistributing + the Work or Derivative Works thereof, You may choose to offer, + and charge a fee for, acceptance of support, warranty, indemnity, + or other liability obligations and/or rights consistent with this + License. However, in accepting such obligations, You may act only + on Your own behalf and on Your sole responsibility, not on behalf + of any other Contributor, and only if You agree to indemnify, + defend, and hold each Contributor harmless for any liability + incurred by, or claims asserted against, such Contributor by reason + of your accepting any such warranty or additional liability. + + END OF TERMS AND CONDITIONS + + APPENDIX: How to apply the Apache License to your work. + + To apply the Apache License to your work, attach the following + boilerplate notice, with the fields enclosed by brackets "[]" + replaced with your own identifying information. (Don't include + the brackets!) The text should be enclosed in the appropriate + comment syntax for the file format. We also recommend that a + file or class name and description of purpose be included on the + same "printed page" as the copyright notice for easier + identification within third-party archives. + + Copyright [yyyy] [name of copyright owner] + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. diff --git a/licenses/LICENSE-avro-random-generator-0.1-20170531.181813-9.txt b/licenses/LICENSE-avro-random-generator-0.1-20170531.181813-9.txt new file mode 100644 index 000000000000..d64569567334 --- /dev/null +++ b/licenses/LICENSE-avro-random-generator-0.1-20170531.181813-9.txt @@ -0,0 +1,202 @@ + + Apache License + Version 2.0, January 2004 + http://www.apache.org/licenses/ + + TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION + + 1. Definitions. + + "License" shall mean the terms and conditions for use, reproduction, + and distribution as defined by Sections 1 through 9 of this document. + + "Licensor" shall mean the copyright owner or entity authorized by + the copyright owner that is granting the License. + + "Legal Entity" shall mean the union of the acting entity and all + other entities that control, are controlled by, or are under common + control with that entity. For the purposes of this definition, + "control" means (i) the power, direct or indirect, to cause the + direction or management of such entity, whether by contract or + otherwise, or (ii) ownership of fifty percent (50%) or more of the + outstanding shares, or (iii) beneficial ownership of such entity. + + "You" (or "Your") shall mean an individual or Legal Entity + exercising permissions granted by this License. + + "Source" form shall mean the preferred form for making modifications, + including but not limited to software source code, documentation + source, and configuration files. + + "Object" form shall mean any form resulting from mechanical + transformation or translation of a Source form, including but + not limited to compiled object code, generated documentation, + and conversions to other media types. + + "Work" shall mean the work of authorship, whether in Source or + Object form, made available under the License, as indicated by a + copyright notice that is included in or attached to the work + (an example is provided in the Appendix below). + + "Derivative Works" shall mean any work, whether in Source or Object + form, that is based on (or derived from) the Work and for which the + editorial revisions, annotations, elaborations, or other modifications + represent, as a whole, an original work of authorship. For the purposes + of this License, Derivative Works shall not include works that remain + separable from, or merely link (or bind by name) to the interfaces of, + the Work and Derivative Works thereof. + + "Contribution" shall mean any work of authorship, including + the original version of the Work and any modifications or additions + to that Work or Derivative Works thereof, that is intentionally + submitted to Licensor for inclusion in the Work by the copyright owner + or by an individual or Legal Entity authorized to submit on behalf of + the copyright owner. For the purposes of this definition, "submitted" + means any form of electronic, verbal, or written communication sent + to the Licensor or its representatives, including but not limited to + communication on electronic mailing lists, source code control systems, + and issue tracking systems that are managed by, or on behalf of, the + Licensor for the purpose of discussing and improving the Work, but + excluding communication that is conspicuously marked or otherwise + designated in writing by the copyright owner as "Not a Contribution." + + "Contributor" shall mean Licensor and any individual or Legal Entity + on behalf of whom a Contribution has been received by Licensor and + subsequently incorporated within the Work. + + 2. Grant of Copyright License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + copyright license to reproduce, prepare Derivative Works of, + publicly display, publicly perform, sublicense, and distribute the + Work and such Derivative Works in Source or Object form. + + 3. Grant of Patent License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + (except as stated in this section) patent license to make, have made, + use, offer to sell, sell, import, and otherwise transfer the Work, + where such license applies only to those patent claims licensable + by such Contributor that are necessarily infringed by their + Contribution(s) alone or by combination of their Contribution(s) + with the Work to which such Contribution(s) was submitted. If You + institute patent litigation against any entity (including a + cross-claim or counterclaim in a lawsuit) alleging that the Work + or a Contribution incorporated within the Work constitutes direct + or contributory patent infringement, then any patent licenses + granted to You under this License for that Work shall terminate + as of the date such litigation is filed. + + 4. Redistribution. You may reproduce and distribute copies of the + Work or Derivative Works thereof in any medium, with or without + modifications, and in Source or Object form, provided that You + meet the following conditions: + + (a) You must give any other recipients of the Work or + Derivative Works a copy of this License; and + + (b) You must cause any modified files to carry prominent notices + stating that You changed the files; and + + (c) You must retain, in the Source form of any Derivative Works + that You distribute, all copyright, patent, trademark, and + attribution notices from the Source form of the Work, + excluding those notices that do not pertain to any part of + the Derivative Works; and + + (d) If the Work includes a "NOTICE" text file as part of its + distribution, then any Derivative Works that You distribute must + include a readable copy of the attribution notices contained + within such NOTICE file, excluding those notices that do not + pertain to any part of the Derivative Works, in at least one + of the following places: within a NOTICE text file distributed + as part of the Derivative Works; within the Source form or + documentation, if provided along with the Derivative Works; or, + within a display generated by the Derivative Works, if and + wherever such third-party notices normally appear. The contents + of the NOTICE file are for informational purposes only and + do not modify the License. You may add Your own attribution + notices within Derivative Works that You distribute, alongside + or as an addendum to the NOTICE text from the Work, provided + that such additional attribution notices cannot be construed + as modifying the License. + + You may add Your own copyright statement to Your modifications and + may provide additional or different license terms and conditions + for use, reproduction, or distribution of Your modifications, or + for any such Derivative Works as a whole, provided Your use, + reproduction, and distribution of the Work otherwise complies with + the conditions stated in this License. + + 5. Submission of Contributions. Unless You explicitly state otherwise, + any Contribution intentionally submitted for inclusion in the Work + by You to the Licensor shall be under the terms and conditions of + this License, without any additional terms or conditions. + Notwithstanding the above, nothing herein shall supersede or modify + the terms of any separate license agreement you may have executed + with Licensor regarding such Contributions. + + 6. Trademarks. This License does not grant permission to use the trade + names, trademarks, service marks, or product names of the Licensor, + except as required for reasonable and customary use in describing the + origin of the Work and reproducing the content of the NOTICE file. + + 7. Disclaimer of Warranty. Unless required by applicable law or + agreed to in writing, Licensor provides the Work (and each + Contributor provides its Contributions) on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or + implied, including, without limitation, any warranties or conditions + of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A + PARTICULAR PURPOSE. You are solely responsible for determining the + appropriateness of using or redistributing the Work and assume any + risks associated with Your exercise of permissions under this License. + + 8. Limitation of Liability. In no event and under no legal theory, + whether in tort (including negligence), contract, or otherwise, + unless required by applicable law (such as deliberate and grossly + negligent acts) or agreed to in writing, shall any Contributor be + liable to You for damages, including any direct, indirect, special, + incidental, or consequential damages of any character arising as a + result of this License or out of the use or inability to use the + Work (including but not limited to damages for loss of goodwill, + work stoppage, computer failure or malfunction, or any and all + other commercial damages or losses), even if such Contributor + has been advised of the possibility of such damages. + + 9. Accepting Warranty or Additional Liability. While redistributing + the Work or Derivative Works thereof, You may choose to offer, + and charge a fee for, acceptance of support, warranty, indemnity, + or other liability obligations and/or rights consistent with this + License. However, in accepting such obligations, You may act only + on Your own behalf and on Your sole responsibility, not on behalf + of any other Contributor, and only if You agree to indemnify, + defend, and hold each Contributor harmless for any liability + incurred by, or claims asserted against, such Contributor by reason + of your accepting any such warranty or additional liability. + + END OF TERMS AND CONDITIONS + + APPENDIX: How to apply the Apache License to your work. + + To apply the Apache License to your work, attach the following + boilerplate notice, with the fields enclosed by brackets "[]" + replaced with your own identifying information. (Don't include + the brackets!) The text should be enclosed in the appropriate + comment syntax for the file format. We also recommend that a + file or class name and description of purpose be included on the + same "printed page" as the copyright notice for easier + identification within third-party archives. + + Copyright [yyyy] [name of copyright owner] + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. diff --git a/licenses/LICENSE-commons-collections4-4.0.txt b/licenses/LICENSE-commons-collections4-4.0.txt new file mode 100644 index 000000000000..d64569567334 --- /dev/null +++ b/licenses/LICENSE-commons-collections4-4.0.txt @@ -0,0 +1,202 @@ + + Apache License + Version 2.0, January 2004 + http://www.apache.org/licenses/ + + TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION + + 1. Definitions. + + "License" shall mean the terms and conditions for use, reproduction, + and distribution as defined by Sections 1 through 9 of this document. + + "Licensor" shall mean the copyright owner or entity authorized by + the copyright owner that is granting the License. + + "Legal Entity" shall mean the union of the acting entity and all + other entities that control, are controlled by, or are under common + control with that entity. For the purposes of this definition, + "control" means (i) the power, direct or indirect, to cause the + direction or management of such entity, whether by contract or + otherwise, or (ii) ownership of fifty percent (50%) or more of the + outstanding shares, or (iii) beneficial ownership of such entity. + + "You" (or "Your") shall mean an individual or Legal Entity + exercising permissions granted by this License. + + "Source" form shall mean the preferred form for making modifications, + including but not limited to software source code, documentation + source, and configuration files. + + "Object" form shall mean any form resulting from mechanical + transformation or translation of a Source form, including but + not limited to compiled object code, generated documentation, + and conversions to other media types. + + "Work" shall mean the work of authorship, whether in Source or + Object form, made available under the License, as indicated by a + copyright notice that is included in or attached to the work + (an example is provided in the Appendix below). + + "Derivative Works" shall mean any work, whether in Source or Object + form, that is based on (or derived from) the Work and for which the + editorial revisions, annotations, elaborations, or other modifications + represent, as a whole, an original work of authorship. For the purposes + of this License, Derivative Works shall not include works that remain + separable from, or merely link (or bind by name) to the interfaces of, + the Work and Derivative Works thereof. + + "Contribution" shall mean any work of authorship, including + the original version of the Work and any modifications or additions + to that Work or Derivative Works thereof, that is intentionally + submitted to Licensor for inclusion in the Work by the copyright owner + or by an individual or Legal Entity authorized to submit on behalf of + the copyright owner. For the purposes of this definition, "submitted" + means any form of electronic, verbal, or written communication sent + to the Licensor or its representatives, including but not limited to + communication on electronic mailing lists, source code control systems, + and issue tracking systems that are managed by, or on behalf of, the + Licensor for the purpose of discussing and improving the Work, but + excluding communication that is conspicuously marked or otherwise + designated in writing by the copyright owner as "Not a Contribution." + + "Contributor" shall mean Licensor and any individual or Legal Entity + on behalf of whom a Contribution has been received by Licensor and + subsequently incorporated within the Work. + + 2. Grant of Copyright License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + copyright license to reproduce, prepare Derivative Works of, + publicly display, publicly perform, sublicense, and distribute the + Work and such Derivative Works in Source or Object form. + + 3. Grant of Patent License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + (except as stated in this section) patent license to make, have made, + use, offer to sell, sell, import, and otherwise transfer the Work, + where such license applies only to those patent claims licensable + by such Contributor that are necessarily infringed by their + Contribution(s) alone or by combination of their Contribution(s) + with the Work to which such Contribution(s) was submitted. If You + institute patent litigation against any entity (including a + cross-claim or counterclaim in a lawsuit) alleging that the Work + or a Contribution incorporated within the Work constitutes direct + or contributory patent infringement, then any patent licenses + granted to You under this License for that Work shall terminate + as of the date such litigation is filed. + + 4. Redistribution. You may reproduce and distribute copies of the + Work or Derivative Works thereof in any medium, with or without + modifications, and in Source or Object form, provided that You + meet the following conditions: + + (a) You must give any other recipients of the Work or + Derivative Works a copy of this License; and + + (b) You must cause any modified files to carry prominent notices + stating that You changed the files; and + + (c) You must retain, in the Source form of any Derivative Works + that You distribute, all copyright, patent, trademark, and + attribution notices from the Source form of the Work, + excluding those notices that do not pertain to any part of + the Derivative Works; and + + (d) If the Work includes a "NOTICE" text file as part of its + distribution, then any Derivative Works that You distribute must + include a readable copy of the attribution notices contained + within such NOTICE file, excluding those notices that do not + pertain to any part of the Derivative Works, in at least one + of the following places: within a NOTICE text file distributed + as part of the Derivative Works; within the Source form or + documentation, if provided along with the Derivative Works; or, + within a display generated by the Derivative Works, if and + wherever such third-party notices normally appear. The contents + of the NOTICE file are for informational purposes only and + do not modify the License. You may add Your own attribution + notices within Derivative Works that You distribute, alongside + or as an addendum to the NOTICE text from the Work, provided + that such additional attribution notices cannot be construed + as modifying the License. + + You may add Your own copyright statement to Your modifications and + may provide additional or different license terms and conditions + for use, reproduction, or distribution of Your modifications, or + for any such Derivative Works as a whole, provided Your use, + reproduction, and distribution of the Work otherwise complies with + the conditions stated in this License. + + 5. Submission of Contributions. Unless You explicitly state otherwise, + any Contribution intentionally submitted for inclusion in the Work + by You to the Licensor shall be under the terms and conditions of + this License, without any additional terms or conditions. + Notwithstanding the above, nothing herein shall supersede or modify + the terms of any separate license agreement you may have executed + with Licensor regarding such Contributions. + + 6. Trademarks. This License does not grant permission to use the trade + names, trademarks, service marks, or product names of the Licensor, + except as required for reasonable and customary use in describing the + origin of the Work and reproducing the content of the NOTICE file. + + 7. Disclaimer of Warranty. Unless required by applicable law or + agreed to in writing, Licensor provides the Work (and each + Contributor provides its Contributions) on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or + implied, including, without limitation, any warranties or conditions + of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A + PARTICULAR PURPOSE. You are solely responsible for determining the + appropriateness of using or redistributing the Work and assume any + risks associated with Your exercise of permissions under this License. + + 8. Limitation of Liability. In no event and under no legal theory, + whether in tort (including negligence), contract, or otherwise, + unless required by applicable law (such as deliberate and grossly + negligent acts) or agreed to in writing, shall any Contributor be + liable to You for damages, including any direct, indirect, special, + incidental, or consequential damages of any character arising as a + result of this License or out of the use or inability to use the + Work (including but not limited to damages for loss of goodwill, + work stoppage, computer failure or malfunction, or any and all + other commercial damages or losses), even if such Contributor + has been advised of the possibility of such damages. + + 9. Accepting Warranty or Additional Liability. While redistributing + the Work or Derivative Works thereof, You may choose to offer, + and charge a fee for, acceptance of support, warranty, indemnity, + or other liability obligations and/or rights consistent with this + License. However, in accepting such obligations, You may act only + on Your own behalf and on Your sole responsibility, not on behalf + of any other Contributor, and only if You agree to indemnify, + defend, and hold each Contributor harmless for any liability + incurred by, or claims asserted against, such Contributor by reason + of your accepting any such warranty or additional liability. + + END OF TERMS AND CONDITIONS + + APPENDIX: How to apply the Apache License to your work. + + To apply the Apache License to your work, attach the following + boilerplate notice, with the fields enclosed by brackets "[]" + replaced with your own identifying information. (Don't include + the brackets!) The text should be enclosed in the appropriate + comment syntax for the file format. We also recommend that a + file or class name and description of purpose be included on the + same "printed page" as the copyright notice for easier + identification within third-party archives. + + Copyright [yyyy] [name of copyright owner] + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. diff --git a/licenses/LICENSE-commons-compress-1.8.1.txt b/licenses/LICENSE-commons-compress-1.8.1.txt new file mode 100644 index 000000000000..261eeb9e9f8b --- /dev/null +++ b/licenses/LICENSE-commons-compress-1.8.1.txt @@ -0,0 +1,201 @@ + Apache License + Version 2.0, January 2004 + http://www.apache.org/licenses/ + + TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION + + 1. Definitions. + + "License" shall mean the terms and conditions for use, reproduction, + and distribution as defined by Sections 1 through 9 of this document. + + "Licensor" shall mean the copyright owner or entity authorized by + the copyright owner that is granting the License. + + "Legal Entity" shall mean the union of the acting entity and all + other entities that control, are controlled by, or are under common + control with that entity. For the purposes of this definition, + "control" means (i) the power, direct or indirect, to cause the + direction or management of such entity, whether by contract or + otherwise, or (ii) ownership of fifty percent (50%) or more of the + outstanding shares, or (iii) beneficial ownership of such entity. + + "You" (or "Your") shall mean an individual or Legal Entity + exercising permissions granted by this License. + + "Source" form shall mean the preferred form for making modifications, + including but not limited to software source code, documentation + source, and configuration files. + + "Object" form shall mean any form resulting from mechanical + transformation or translation of a Source form, including but + not limited to compiled object code, generated documentation, + and conversions to other media types. + + "Work" shall mean the work of authorship, whether in Source or + Object form, made available under the License, as indicated by a + copyright notice that is included in or attached to the work + (an example is provided in the Appendix below). + + "Derivative Works" shall mean any work, whether in Source or Object + form, that is based on (or derived from) the Work and for which the + editorial revisions, annotations, elaborations, or other modifications + represent, as a whole, an original work of authorship. For the purposes + of this License, Derivative Works shall not include works that remain + separable from, or merely link (or bind by name) to the interfaces of, + the Work and Derivative Works thereof. + + "Contribution" shall mean any work of authorship, including + the original version of the Work and any modifications or additions + to that Work or Derivative Works thereof, that is intentionally + submitted to Licensor for inclusion in the Work by the copyright owner + or by an individual or Legal Entity authorized to submit on behalf of + the copyright owner. For the purposes of this definition, "submitted" + means any form of electronic, verbal, or written communication sent + to the Licensor or its representatives, including but not limited to + communication on electronic mailing lists, source code control systems, + and issue tracking systems that are managed by, or on behalf of, the + Licensor for the purpose of discussing and improving the Work, but + excluding communication that is conspicuously marked or otherwise + designated in writing by the copyright owner as "Not a Contribution." + + "Contributor" shall mean Licensor and any individual or Legal Entity + on behalf of whom a Contribution has been received by Licensor and + subsequently incorporated within the Work. + + 2. Grant of Copyright License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + copyright license to reproduce, prepare Derivative Works of, + publicly display, publicly perform, sublicense, and distribute the + Work and such Derivative Works in Source or Object form. + + 3. Grant of Patent License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + (except as stated in this section) patent license to make, have made, + use, offer to sell, sell, import, and otherwise transfer the Work, + where such license applies only to those patent claims licensable + by such Contributor that are necessarily infringed by their + Contribution(s) alone or by combination of their Contribution(s) + with the Work to which such Contribution(s) was submitted. If You + institute patent litigation against any entity (including a + cross-claim or counterclaim in a lawsuit) alleging that the Work + or a Contribution incorporated within the Work constitutes direct + or contributory patent infringement, then any patent licenses + granted to You under this License for that Work shall terminate + as of the date such litigation is filed. + + 4. Redistribution. You may reproduce and distribute copies of the + Work or Derivative Works thereof in any medium, with or without + modifications, and in Source or Object form, provided that You + meet the following conditions: + + (a) You must give any other recipients of the Work or + Derivative Works a copy of this License; and + + (b) You must cause any modified files to carry prominent notices + stating that You changed the files; and + + (c) You must retain, in the Source form of any Derivative Works + that You distribute, all copyright, patent, trademark, and + attribution notices from the Source form of the Work, + excluding those notices that do not pertain to any part of + the Derivative Works; and + + (d) If the Work includes a "NOTICE" text file as part of its + distribution, then any Derivative Works that You distribute must + include a readable copy of the attribution notices contained + within such NOTICE file, excluding those notices that do not + pertain to any part of the Derivative Works, in at least one + of the following places: within a NOTICE text file distributed + as part of the Derivative Works; within the Source form or + documentation, if provided along with the Derivative Works; or, + within a display generated by the Derivative Works, if and + wherever such third-party notices normally appear. The contents + of the NOTICE file are for informational purposes only and + do not modify the License. You may add Your own attribution + notices within Derivative Works that You distribute, alongside + or as an addendum to the NOTICE text from the Work, provided + that such additional attribution notices cannot be construed + as modifying the License. + + You may add Your own copyright statement to Your modifications and + may provide additional or different license terms and conditions + for use, reproduction, or distribution of Your modifications, or + for any such Derivative Works as a whole, provided Your use, + reproduction, and distribution of the Work otherwise complies with + the conditions stated in this License. + + 5. Submission of Contributions. Unless You explicitly state otherwise, + any Contribution intentionally submitted for inclusion in the Work + by You to the Licensor shall be under the terms and conditions of + this License, without any additional terms or conditions. + Notwithstanding the above, nothing herein shall supersede or modify + the terms of any separate license agreement you may have executed + with Licensor regarding such Contributions. + + 6. Trademarks. This License does not grant permission to use the trade + names, trademarks, service marks, or product names of the Licensor, + except as required for reasonable and customary use in describing the + origin of the Work and reproducing the content of the NOTICE file. + + 7. Disclaimer of Warranty. Unless required by applicable law or + agreed to in writing, Licensor provides the Work (and each + Contributor provides its Contributions) on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or + implied, including, without limitation, any warranties or conditions + of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A + PARTICULAR PURPOSE. You are solely responsible for determining the + appropriateness of using or redistributing the Work and assume any + risks associated with Your exercise of permissions under this License. + + 8. Limitation of Liability. In no event and under no legal theory, + whether in tort (including negligence), contract, or otherwise, + unless required by applicable law (such as deliberate and grossly + negligent acts) or agreed to in writing, shall any Contributor be + liable to You for damages, including any direct, indirect, special, + incidental, or consequential damages of any character arising as a + result of this License or out of the use or inability to use the + Work (including but not limited to damages for loss of goodwill, + work stoppage, computer failure or malfunction, or any and all + other commercial damages or losses), even if such Contributor + has been advised of the possibility of such damages. + + 9. Accepting Warranty or Additional Liability. While redistributing + the Work or Derivative Works thereof, You may choose to offer, + and charge a fee for, acceptance of support, warranty, indemnity, + or other liability obligations and/or rights consistent with this + License. However, in accepting such obligations, You may act only + on Your own behalf and on Your sole responsibility, not on behalf + of any other Contributor, and only if You agree to indemnify, + defend, and hold each Contributor harmless for any liability + incurred by, or claims asserted against, such Contributor by reason + of your accepting any such warranty or additional liability. + + END OF TERMS AND CONDITIONS + + APPENDIX: How to apply the Apache License to your work. + + To apply the Apache License to your work, attach the following + boilerplate notice, with the fields enclosed by brackets "[]" + replaced with your own identifying information. (Don't include + the brackets!) The text should be enclosed in the appropriate + comment syntax for the file format. We also recommend that a + file or class name and description of purpose be included on the + same "printed page" as the copyright notice for easier + identification within third-party archives. + + Copyright [yyyy] [name of copyright owner] + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. diff --git a/licenses/LICENSE-commons-csv-1.4.txt b/licenses/LICENSE-commons-csv-1.4.txt new file mode 100644 index 000000000000..75b52484ea47 --- /dev/null +++ b/licenses/LICENSE-commons-csv-1.4.txt @@ -0,0 +1,202 @@ + + Apache License + Version 2.0, January 2004 + http://www.apache.org/licenses/ + + TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION + + 1. Definitions. + + "License" shall mean the terms and conditions for use, reproduction, + and distribution as defined by Sections 1 through 9 of this document. + + "Licensor" shall mean the copyright owner or entity authorized by + the copyright owner that is granting the License. + + "Legal Entity" shall mean the union of the acting entity and all + other entities that control, are controlled by, or are under common + control with that entity. For the purposes of this definition, + "control" means (i) the power, direct or indirect, to cause the + direction or management of such entity, whether by contract or + otherwise, or (ii) ownership of fifty percent (50%) or more of the + outstanding shares, or (iii) beneficial ownership of such entity. + + "You" (or "Your") shall mean an individual or Legal Entity + exercising permissions granted by this License. + + "Source" form shall mean the preferred form for making modifications, + including but not limited to software source code, documentation + source, and configuration files. + + "Object" form shall mean any form resulting from mechanical + transformation or translation of a Source form, including but + not limited to compiled object code, generated documentation, + and conversions to other media types. + + "Work" shall mean the work of authorship, whether in Source or + Object form, made available under the License, as indicated by a + copyright notice that is included in or attached to the work + (an example is provided in the Appendix below). + + "Derivative Works" shall mean any work, whether in Source or Object + form, that is based on (or derived from) the Work and for which the + editorial revisions, annotations, elaborations, or other modifications + represent, as a whole, an original work of authorship. For the purposes + of this License, Derivative Works shall not include works that remain + separable from, or merely link (or bind by name) to the interfaces of, + the Work and Derivative Works thereof. + + "Contribution" shall mean any work of authorship, including + the original version of the Work and any modifications or additions + to that Work or Derivative Works thereof, that is intentionally + submitted to Licensor for inclusion in the Work by the copyright owner + or by an individual or Legal Entity authorized to submit on behalf of + the copyright owner. For the purposes of this definition, "submitted" + means any form of electronic, verbal, or written communication sent + to the Licensor or its representatives, including but not limited to + communication on electronic mailing lists, source code control systems, + and issue tracking systems that are managed by, or on behalf of, the + Licensor for the purpose of discussing and improving the Work, but + excluding communication that is conspicuously marked or otherwise + designated in writing by the copyright owner as "Not a Contribution." + + "Contributor" shall mean Licensor and any individual or Legal Entity + on behalf of whom a Contribution has been received by Licensor and + subsequently incorporated within the Work. + + 2. Grant of Copyright License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + copyright license to reproduce, prepare Derivative Works of, + publicly display, publicly perform, sublicense, and distribute the + Work and such Derivative Works in Source or Object form. + + 3. Grant of Patent License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + (except as stated in this section) patent license to make, have made, + use, offer to sell, sell, import, and otherwise transfer the Work, + where such license applies only to those patent claims licensable + by such Contributor that are necessarily infringed by their + Contribution(s) alone or by combination of their Contribution(s) + with the Work to which such Contribution(s) was submitted. If You + institute patent litigation against any entity (including a + cross-claim or counterclaim in a lawsuit) alleging that the Work + or a Contribution incorporated within the Work constitutes direct + or contributory patent infringement, then any patent licenses + granted to You under this License for that Work shall terminate + as of the date such litigation is filed. + + 4. Redistribution. You may reproduce and distribute copies of the + Work or Derivative Works thereof in any medium, with or without + modifications, and in Source or Object form, provided that You + meet the following conditions: + + (a) You must give any other recipients of the Work or + Derivative Works a copy of this License; and + + (b) You must cause any modified files to carry prominent notices + stating that You changed the files; and + + (c) You must retain, in the Source form of any Derivative Works + that You distribute, all copyright, patent, trademark, and + attribution notices from the Source form of the Work, + excluding those notices that do not pertain to any part of + the Derivative Works; and + + (d) If the Work includes a "NOTICE" text file as part of its + distribution, then any Derivative Works that You distribute must + include a readable copy of the attribution notices contained + within such NOTICE file, excluding those notices that do not + pertain to any part of the Derivative Works, in at least one + of the following places: within a NOTICE text file distributed + as part of the Derivative Works; within the Source form or + documentation, if provided along with the Derivative Works; or, + within a display generated by the Derivative Works, if and + wherever such third-party notices normally appear. The contents + of the NOTICE file are for informational purposes only and + do not modify the License. You may add Your own attribution + notices within Derivative Works that You distribute, alongside + or as an addendum to the NOTICE text from the Work, provided + that such additional attribution notices cannot be construed + as modifying the License. + + You may add Your own copyright statement to Your modifications and + may provide additional or different license terms and conditions + for use, reproduction, or distribution of Your modifications, or + for any such Derivative Works as a whole, provided Your use, + reproduction, and distribution of the Work otherwise complies with + the conditions stated in this License. + + 5. Submission of Contributions. Unless You explicitly state otherwise, + any Contribution intentionally submitted for inclusion in the Work + by You to the Licensor shall be under the terms and conditions of + this License, without any additional terms or conditions. + Notwithstanding the above, nothing herein shall supersede or modify + the terms of any separate license agreement you may have executed + with Licensor regarding such Contributions. + + 6. Trademarks. This License does not grant permission to use the trade + names, trademarks, service marks, or product names of the Licensor, + except as required for reasonable and customary use in describing the + origin of the Work and reproducing the content of the NOTICE file. + + 7. Disclaimer of Warranty. Unless required by applicable law or + agreed to in writing, Licensor provides the Work (and each + Contributor provides its Contributions) on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or + implied, including, without limitation, any warranties or conditions + of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A + PARTICULAR PURPOSE. You are solely responsible for determining the + appropriateness of using or redistributing the Work and assume any + risks associated with Your exercise of permissions under this License. + + 8. Limitation of Liability. In no event and under no legal theory, + whether in tort (including negligence), contract, or otherwise, + unless required by applicable law (such as deliberate and grossly + negligent acts) or agreed to in writing, shall any Contributor be + liable to You for damages, including any direct, indirect, special, + incidental, or consequential damages of any character arising as a + result of this License or out of the use or inability to use the + Work (including but not limited to damages for loss of goodwill, + work stoppage, computer failure or malfunction, or any and all + other commercial damages or losses), even if such Contributor + has been advised of the possibility of such damages. + + 9. Accepting Warranty or Additional Liability. While redistributing + the Work or Derivative Works thereof, You may choose to offer, + and charge a fee for, acceptance of support, warranty, indemnity, + or other liability obligations and/or rights consistent with this + License. However, in accepting such obligations, You may act only + on Your own behalf and on Your sole responsibility, not on behalf + of any other Contributor, and only if You agree to indemnify, + defend, and hold each Contributor harmless for any liability + incurred by, or claims asserted against, such Contributor by reason + of your accepting any such warranty or additional liability. + + END OF TERMS AND CONDITIONS + + APPENDIX: How to apply the Apache License to your work. + + To apply the Apache License to your work, attach the following + boilerplate notice, with the fields enclosed by brackets "[]" + replaced with your own identifying information. (Don't include + the brackets!) The text should be enclosed in the appropriate + comment syntax for the file format. We also recommend that a + file or class name and description of purpose be included on the + same "printed page" as the copyright notice for easier + identification within third-party archives. + + Copyright [yyyy] [name of copyright owner] + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. diff --git a/licenses/LICENSE-commons-lang3-3.3.2.txt b/licenses/LICENSE-commons-lang3-3.3.2.txt new file mode 100644 index 000000000000..d64569567334 --- /dev/null +++ b/licenses/LICENSE-commons-lang3-3.3.2.txt @@ -0,0 +1,202 @@ + + Apache License + Version 2.0, January 2004 + http://www.apache.org/licenses/ + + TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION + + 1. Definitions. + + "License" shall mean the terms and conditions for use, reproduction, + and distribution as defined by Sections 1 through 9 of this document. + + "Licensor" shall mean the copyright owner or entity authorized by + the copyright owner that is granting the License. + + "Legal Entity" shall mean the union of the acting entity and all + other entities that control, are controlled by, or are under common + control with that entity. For the purposes of this definition, + "control" means (i) the power, direct or indirect, to cause the + direction or management of such entity, whether by contract or + otherwise, or (ii) ownership of fifty percent (50%) or more of the + outstanding shares, or (iii) beneficial ownership of such entity. + + "You" (or "Your") shall mean an individual or Legal Entity + exercising permissions granted by this License. + + "Source" form shall mean the preferred form for making modifications, + including but not limited to software source code, documentation + source, and configuration files. + + "Object" form shall mean any form resulting from mechanical + transformation or translation of a Source form, including but + not limited to compiled object code, generated documentation, + and conversions to other media types. + + "Work" shall mean the work of authorship, whether in Source or + Object form, made available under the License, as indicated by a + copyright notice that is included in or attached to the work + (an example is provided in the Appendix below). + + "Derivative Works" shall mean any work, whether in Source or Object + form, that is based on (or derived from) the Work and for which the + editorial revisions, annotations, elaborations, or other modifications + represent, as a whole, an original work of authorship. For the purposes + of this License, Derivative Works shall not include works that remain + separable from, or merely link (or bind by name) to the interfaces of, + the Work and Derivative Works thereof. + + "Contribution" shall mean any work of authorship, including + the original version of the Work and any modifications or additions + to that Work or Derivative Works thereof, that is intentionally + submitted to Licensor for inclusion in the Work by the copyright owner + or by an individual or Legal Entity authorized to submit on behalf of + the copyright owner. For the purposes of this definition, "submitted" + means any form of electronic, verbal, or written communication sent + to the Licensor or its representatives, including but not limited to + communication on electronic mailing lists, source code control systems, + and issue tracking systems that are managed by, or on behalf of, the + Licensor for the purpose of discussing and improving the Work, but + excluding communication that is conspicuously marked or otherwise + designated in writing by the copyright owner as "Not a Contribution." + + "Contributor" shall mean Licensor and any individual or Legal Entity + on behalf of whom a Contribution has been received by Licensor and + subsequently incorporated within the Work. + + 2. Grant of Copyright License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + copyright license to reproduce, prepare Derivative Works of, + publicly display, publicly perform, sublicense, and distribute the + Work and such Derivative Works in Source or Object form. + + 3. Grant of Patent License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + (except as stated in this section) patent license to make, have made, + use, offer to sell, sell, import, and otherwise transfer the Work, + where such license applies only to those patent claims licensable + by such Contributor that are necessarily infringed by their + Contribution(s) alone or by combination of their Contribution(s) + with the Work to which such Contribution(s) was submitted. If You + institute patent litigation against any entity (including a + cross-claim or counterclaim in a lawsuit) alleging that the Work + or a Contribution incorporated within the Work constitutes direct + or contributory patent infringement, then any patent licenses + granted to You under this License for that Work shall terminate + as of the date such litigation is filed. + + 4. Redistribution. You may reproduce and distribute copies of the + Work or Derivative Works thereof in any medium, with or without + modifications, and in Source or Object form, provided that You + meet the following conditions: + + (a) You must give any other recipients of the Work or + Derivative Works a copy of this License; and + + (b) You must cause any modified files to carry prominent notices + stating that You changed the files; and + + (c) You must retain, in the Source form of any Derivative Works + that You distribute, all copyright, patent, trademark, and + attribution notices from the Source form of the Work, + excluding those notices that do not pertain to any part of + the Derivative Works; and + + (d) If the Work includes a "NOTICE" text file as part of its + distribution, then any Derivative Works that You distribute must + include a readable copy of the attribution notices contained + within such NOTICE file, excluding those notices that do not + pertain to any part of the Derivative Works, in at least one + of the following places: within a NOTICE text file distributed + as part of the Derivative Works; within the Source form or + documentation, if provided along with the Derivative Works; or, + within a display generated by the Derivative Works, if and + wherever such third-party notices normally appear. The contents + of the NOTICE file are for informational purposes only and + do not modify the License. You may add Your own attribution + notices within Derivative Works that You distribute, alongside + or as an addendum to the NOTICE text from the Work, provided + that such additional attribution notices cannot be construed + as modifying the License. + + You may add Your own copyright statement to Your modifications and + may provide additional or different license terms and conditions + for use, reproduction, or distribution of Your modifications, or + for any such Derivative Works as a whole, provided Your use, + reproduction, and distribution of the Work otherwise complies with + the conditions stated in this License. + + 5. Submission of Contributions. Unless You explicitly state otherwise, + any Contribution intentionally submitted for inclusion in the Work + by You to the Licensor shall be under the terms and conditions of + this License, without any additional terms or conditions. + Notwithstanding the above, nothing herein shall supersede or modify + the terms of any separate license agreement you may have executed + with Licensor regarding such Contributions. + + 6. Trademarks. This License does not grant permission to use the trade + names, trademarks, service marks, or product names of the Licensor, + except as required for reasonable and customary use in describing the + origin of the Work and reproducing the content of the NOTICE file. + + 7. Disclaimer of Warranty. Unless required by applicable law or + agreed to in writing, Licensor provides the Work (and each + Contributor provides its Contributions) on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or + implied, including, without limitation, any warranties or conditions + of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A + PARTICULAR PURPOSE. You are solely responsible for determining the + appropriateness of using or redistributing the Work and assume any + risks associated with Your exercise of permissions under this License. + + 8. Limitation of Liability. In no event and under no legal theory, + whether in tort (including negligence), contract, or otherwise, + unless required by applicable law (such as deliberate and grossly + negligent acts) or agreed to in writing, shall any Contributor be + liable to You for damages, including any direct, indirect, special, + incidental, or consequential damages of any character arising as a + result of this License or out of the use or inability to use the + Work (including but not limited to damages for loss of goodwill, + work stoppage, computer failure or malfunction, or any and all + other commercial damages or losses), even if such Contributor + has been advised of the possibility of such damages. + + 9. Accepting Warranty or Additional Liability. While redistributing + the Work or Derivative Works thereof, You may choose to offer, + and charge a fee for, acceptance of support, warranty, indemnity, + or other liability obligations and/or rights consistent with this + License. However, in accepting such obligations, You may act only + on Your own behalf and on Your sole responsibility, not on behalf + of any other Contributor, and only if You agree to indemnify, + defend, and hold each Contributor harmless for any liability + incurred by, or claims asserted against, such Contributor by reason + of your accepting any such warranty or additional liability. + + END OF TERMS AND CONDITIONS + + APPENDIX: How to apply the Apache License to your work. + + To apply the Apache License to your work, attach the following + boilerplate notice, with the fields enclosed by brackets "[]" + replaced with your own identifying information. (Don't include + the brackets!) The text should be enclosed in the appropriate + comment syntax for the file format. We also recommend that a + file or class name and description of purpose be included on the + same "printed page" as the copyright notice for easier + identification within third-party archives. + + Copyright [yyyy] [name of copyright owner] + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. diff --git a/licenses/LICENSE-generex-1.0.1.txt b/licenses/LICENSE-generex-1.0.1.txt new file mode 100644 index 000000000000..d64569567334 --- /dev/null +++ b/licenses/LICENSE-generex-1.0.1.txt @@ -0,0 +1,202 @@ + + Apache License + Version 2.0, January 2004 + http://www.apache.org/licenses/ + + TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION + + 1. Definitions. + + "License" shall mean the terms and conditions for use, reproduction, + and distribution as defined by Sections 1 through 9 of this document. + + "Licensor" shall mean the copyright owner or entity authorized by + the copyright owner that is granting the License. + + "Legal Entity" shall mean the union of the acting entity and all + other entities that control, are controlled by, or are under common + control with that entity. For the purposes of this definition, + "control" means (i) the power, direct or indirect, to cause the + direction or management of such entity, whether by contract or + otherwise, or (ii) ownership of fifty percent (50%) or more of the + outstanding shares, or (iii) beneficial ownership of such entity. + + "You" (or "Your") shall mean an individual or Legal Entity + exercising permissions granted by this License. + + "Source" form shall mean the preferred form for making modifications, + including but not limited to software source code, documentation + source, and configuration files. + + "Object" form shall mean any form resulting from mechanical + transformation or translation of a Source form, including but + not limited to compiled object code, generated documentation, + and conversions to other media types. + + "Work" shall mean the work of authorship, whether in Source or + Object form, made available under the License, as indicated by a + copyright notice that is included in or attached to the work + (an example is provided in the Appendix below). + + "Derivative Works" shall mean any work, whether in Source or Object + form, that is based on (or derived from) the Work and for which the + editorial revisions, annotations, elaborations, or other modifications + represent, as a whole, an original work of authorship. For the purposes + of this License, Derivative Works shall not include works that remain + separable from, or merely link (or bind by name) to the interfaces of, + the Work and Derivative Works thereof. + + "Contribution" shall mean any work of authorship, including + the original version of the Work and any modifications or additions + to that Work or Derivative Works thereof, that is intentionally + submitted to Licensor for inclusion in the Work by the copyright owner + or by an individual or Legal Entity authorized to submit on behalf of + the copyright owner. For the purposes of this definition, "submitted" + means any form of electronic, verbal, or written communication sent + to the Licensor or its representatives, including but not limited to + communication on electronic mailing lists, source code control systems, + and issue tracking systems that are managed by, or on behalf of, the + Licensor for the purpose of discussing and improving the Work, but + excluding communication that is conspicuously marked or otherwise + designated in writing by the copyright owner as "Not a Contribution." + + "Contributor" shall mean Licensor and any individual or Legal Entity + on behalf of whom a Contribution has been received by Licensor and + subsequently incorporated within the Work. + + 2. Grant of Copyright License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + copyright license to reproduce, prepare Derivative Works of, + publicly display, publicly perform, sublicense, and distribute the + Work and such Derivative Works in Source or Object form. + + 3. Grant of Patent License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + (except as stated in this section) patent license to make, have made, + use, offer to sell, sell, import, and otherwise transfer the Work, + where such license applies only to those patent claims licensable + by such Contributor that are necessarily infringed by their + Contribution(s) alone or by combination of their Contribution(s) + with the Work to which such Contribution(s) was submitted. If You + institute patent litigation against any entity (including a + cross-claim or counterclaim in a lawsuit) alleging that the Work + or a Contribution incorporated within the Work constitutes direct + or contributory patent infringement, then any patent licenses + granted to You under this License for that Work shall terminate + as of the date such litigation is filed. + + 4. Redistribution. You may reproduce and distribute copies of the + Work or Derivative Works thereof in any medium, with or without + modifications, and in Source or Object form, provided that You + meet the following conditions: + + (a) You must give any other recipients of the Work or + Derivative Works a copy of this License; and + + (b) You must cause any modified files to carry prominent notices + stating that You changed the files; and + + (c) You must retain, in the Source form of any Derivative Works + that You distribute, all copyright, patent, trademark, and + attribution notices from the Source form of the Work, + excluding those notices that do not pertain to any part of + the Derivative Works; and + + (d) If the Work includes a "NOTICE" text file as part of its + distribution, then any Derivative Works that You distribute must + include a readable copy of the attribution notices contained + within such NOTICE file, excluding those notices that do not + pertain to any part of the Derivative Works, in at least one + of the following places: within a NOTICE text file distributed + as part of the Derivative Works; within the Source form or + documentation, if provided along with the Derivative Works; or, + within a display generated by the Derivative Works, if and + wherever such third-party notices normally appear. The contents + of the NOTICE file are for informational purposes only and + do not modify the License. You may add Your own attribution + notices within Derivative Works that You distribute, alongside + or as an addendum to the NOTICE text from the Work, provided + that such additional attribution notices cannot be construed + as modifying the License. + + You may add Your own copyright statement to Your modifications and + may provide additional or different license terms and conditions + for use, reproduction, or distribution of Your modifications, or + for any such Derivative Works as a whole, provided Your use, + reproduction, and distribution of the Work otherwise complies with + the conditions stated in this License. + + 5. Submission of Contributions. Unless You explicitly state otherwise, + any Contribution intentionally submitted for inclusion in the Work + by You to the Licensor shall be under the terms and conditions of + this License, without any additional terms or conditions. + Notwithstanding the above, nothing herein shall supersede or modify + the terms of any separate license agreement you may have executed + with Licensor regarding such Contributions. + + 6. Trademarks. This License does not grant permission to use the trade + names, trademarks, service marks, or product names of the Licensor, + except as required for reasonable and customary use in describing the + origin of the Work and reproducing the content of the NOTICE file. + + 7. Disclaimer of Warranty. Unless required by applicable law or + agreed to in writing, Licensor provides the Work (and each + Contributor provides its Contributions) on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or + implied, including, without limitation, any warranties or conditions + of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A + PARTICULAR PURPOSE. You are solely responsible for determining the + appropriateness of using or redistributing the Work and assume any + risks associated with Your exercise of permissions under this License. + + 8. Limitation of Liability. In no event and under no legal theory, + whether in tort (including negligence), contract, or otherwise, + unless required by applicable law (such as deliberate and grossly + negligent acts) or agreed to in writing, shall any Contributor be + liable to You for damages, including any direct, indirect, special, + incidental, or consequential damages of any character arising as a + result of this License or out of the use or inability to use the + Work (including but not limited to damages for loss of goodwill, + work stoppage, computer failure or malfunction, or any and all + other commercial damages or losses), even if such Contributor + has been advised of the possibility of such damages. + + 9. Accepting Warranty or Additional Liability. While redistributing + the Work or Derivative Works thereof, You may choose to offer, + and charge a fee for, acceptance of support, warranty, indemnity, + or other liability obligations and/or rights consistent with this + License. However, in accepting such obligations, You may act only + on Your own behalf and on Your sole responsibility, not on behalf + of any other Contributor, and only if You agree to indemnify, + defend, and hold each Contributor harmless for any liability + incurred by, or claims asserted against, such Contributor by reason + of your accepting any such warranty or additional liability. + + END OF TERMS AND CONDITIONS + + APPENDIX: How to apply the Apache License to your work. + + To apply the Apache License to your work, attach the following + boilerplate notice, with the fields enclosed by brackets "[]" + replaced with your own identifying information. (Don't include + the brackets!) The text should be enclosed in the appropriate + comment syntax for the file format. We also recommend that a + file or class name and description of purpose be included on the + same "printed page" as the copyright notice for easier + identification within third-party archives. + + Copyright [yyyy] [name of copyright owner] + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. diff --git a/licenses/LICENSE-jackson-annotations-2.8.0.txt b/licenses/LICENSE-jackson-annotations-2.8.0.txt new file mode 100644 index 000000000000..ff94ef8c456a --- /dev/null +++ b/licenses/LICENSE-jackson-annotations-2.8.0.txt @@ -0,0 +1,8 @@ +This copy of Jackson JSON processor annotations is licensed under the +Apache (Software) License, version 2.0 ("the License"). +See the License for details about distribution rights, and the +specific rights regarding derivate works. + +You may obtain a copy of the License at: + +http://www.apache.org/licenses/LICENSE-2.0 diff --git a/licenses/LICENSE-jackson-core-2.8.4.txt b/licenses/LICENSE-jackson-core-2.8.4.txt new file mode 100644 index 000000000000..f5f45d26a49d --- /dev/null +++ b/licenses/LICENSE-jackson-core-2.8.4.txt @@ -0,0 +1,8 @@ +This copy of Jackson JSON processor streaming parser/generator is licensed under the +Apache (Software) License, version 2.0 ("the License"). +See the License for details about distribution rights, and the +specific rights regarding derivate works. + +You may obtain a copy of the License at: + +http://www.apache.org/licenses/LICENSE-2.0 diff --git a/licenses/LICENSE-jackson-core-2.8.5.txt b/licenses/LICENSE-jackson-core-2.8.5.txt new file mode 100644 index 000000000000..f5f45d26a49d --- /dev/null +++ b/licenses/LICENSE-jackson-core-2.8.5.txt @@ -0,0 +1,8 @@ +This copy of Jackson JSON processor streaming parser/generator is licensed under the +Apache (Software) License, version 2.0 ("the License"). +See the License for details about distribution rights, and the +specific rights regarding derivate works. + +You may obtain a copy of the License at: + +http://www.apache.org/licenses/LICENSE-2.0 diff --git a/licenses/LICENSE-jackson-core-asl-1.9.13.txt b/licenses/LICENSE-jackson-core-asl-1.9.13.txt new file mode 100644 index 000000000000..3eaf591bec2e --- /dev/null +++ b/licenses/LICENSE-jackson-core-asl-1.9.13.txt @@ -0,0 +1,13 @@ +This copy of Jackson JSON processor is licensed under the +Apache (Software) License, version 2.0 ("the License"). +See the License for details about distribution rights, and the +specific rights regarding derivate works. + +You may obtain a copy of the License at: + +http://www.apache.org/licenses/ + +A copy is also included with both the the downloadable source code package +and jar that contains class bytecodes, as file "ASL 2.0". In both cases, +that file should be located next to this file: in source distribution +the location should be "release-notes/asl"; and in jar "META-INF/" diff --git a/licenses/LICENSE-jackson-databind-2.8.4.txt b/licenses/LICENSE-jackson-databind-2.8.4.txt new file mode 100644 index 000000000000..6acf75483f9b --- /dev/null +++ b/licenses/LICENSE-jackson-databind-2.8.4.txt @@ -0,0 +1,8 @@ +This copy of Jackson JSON processor databind module is licensed under the +Apache (Software) License, version 2.0 ("the License"). +See the License for details about distribution rights, and the +specific rights regarding derivate works. + +You may obtain a copy of the License at: + +http://www.apache.org/licenses/LICENSE-2.0 diff --git a/licenses/LICENSE-jackson-databind-2.8.5.txt b/licenses/LICENSE-jackson-databind-2.8.5.txt new file mode 100644 index 000000000000..6acf75483f9b --- /dev/null +++ b/licenses/LICENSE-jackson-databind-2.8.5.txt @@ -0,0 +1,8 @@ +This copy of Jackson JSON processor databind module is licensed under the +Apache (Software) License, version 2.0 ("the License"). +See the License for details about distribution rights, and the +specific rights regarding derivate works. + +You may obtain a copy of the License at: + +http://www.apache.org/licenses/LICENSE-2.0 diff --git a/licenses/LICENSE-jackson-mapper-asl-1.9.13.txt b/licenses/LICENSE-jackson-mapper-asl-1.9.13.txt new file mode 100644 index 000000000000..3eaf591bec2e --- /dev/null +++ b/licenses/LICENSE-jackson-mapper-asl-1.9.13.txt @@ -0,0 +1,13 @@ +This copy of Jackson JSON processor is licensed under the +Apache (Software) License, version 2.0 ("the License"). +See the License for details about distribution rights, and the +specific rights regarding derivate works. + +You may obtain a copy of the License at: + +http://www.apache.org/licenses/ + +A copy is also included with both the the downloadable source code package +and jar that contains class bytecodes, as file "ASL 2.0". In both cases, +that file should be located next to this file: in source distribution +the location should be "release-notes/asl"; and in jar "META-INF/" diff --git a/licenses/LICENSE-jline-3.3.1.txt b/licenses/LICENSE-jline-3.3.1.txt new file mode 100644 index 000000000000..4e51621759fe --- /dev/null +++ b/licenses/LICENSE-jline-3.3.1.txt @@ -0,0 +1,7 @@ + + +301 Moved Permanently + +

Moved Permanently

+

The document has moved here.

+ diff --git a/licenses/LICENSE-jol-core-0.2.txt b/licenses/LICENSE-jol-core-0.2.txt new file mode 100644 index 000000000000..b40a0f457d75 --- /dev/null +++ b/licenses/LICENSE-jol-core-0.2.txt @@ -0,0 +1,347 @@ +The GNU General Public License (GPL) + +Version 2, June 1991 + +Copyright (C) 1989, 1991 Free Software Foundation, Inc. +59 Temple Place, Suite 330, Boston, MA 02111-1307 USA + +Everyone is permitted to copy and distribute verbatim copies of this license +document, but changing it is not allowed. + +Preamble + +The licenses for most software are designed to take away your freedom to share +and change it. By contrast, the GNU General Public License is intended to +guarantee your freedom to share and change free software--to make sure the +software is free for all its users. This General Public License applies to +most of the Free Software Foundation's software and to any other program whose +authors commit to using it. (Some other Free Software Foundation software is +covered by the GNU Library General Public License instead.) You can apply it to +your programs, too. + +When we speak of free software, we are referring to freedom, not price. Our +General Public Licenses are designed to make sure that you have the freedom to +distribute copies of free software (and charge for this service if you wish), +that you receive source code or can get it if you want it, that you can change +the software or use pieces of it in new free programs; and that you know you +can do these things. + +To protect your rights, we need to make restrictions that forbid anyone to deny +you these rights or to ask you to surrender the rights. These restrictions +translate to certain responsibilities for you if you distribute copies of the +software, or if you modify it. + +For example, if you distribute copies of such a program, whether gratis or for +a fee, you must give the recipients all the rights that you have. You must +make sure that they, too, receive or can get the source code. And you must +show them these terms so they know their rights. + +We protect your rights with two steps: (1) copyright the software, and (2) +offer you this license which gives you legal permission to copy, distribute +and/or modify the software. + +Also, for each author's protection and ours, we want to make certain that +everyone understands that there is no warranty for this free software. If the +software is modified by someone else and passed on, we want its recipients to +know that what they have is not the original, so that any problems introduced +by others will not reflect on the original authors' reputations. + +Finally, any free program is threatened constantly by software patents. We +wish to avoid the danger that redistributors of a free program will +individually obtain patent licenses, in effect making the program proprietary. +To prevent this, we have made it clear that any patent must be licensed for +everyone's free use or not licensed at all. + +The precise terms and conditions for copying, distribution and modification +follow. + +TERMS AND CONDITIONS FOR COPYING, DISTRIBUTION AND MODIFICATION + +0. This License applies to any program or other work which contains a notice +placed by the copyright holder saying it may be distributed under the terms of +this General Public License. The "Program", below, refers to any such program +or work, and a "work based on the Program" means either the Program or any +derivative work under copyright law: that is to say, a work containing the +Program or a portion of it, either verbatim or with modifications and/or +translated into another language. (Hereinafter, translation is included +without limitation in the term "modification".) Each licensee is addressed as +"you". + +Activities other than copying, distribution and modification are not covered by +this License; they are outside its scope. The act of running the Program is +not restricted, and the output from the Program is covered only if its contents +constitute a work based on the Program (independent of having been made by +running the Program). Whether that is true depends on what the Program does. + +1. You may copy and distribute verbatim copies of the Program's source code as +you receive it, in any medium, provided that you conspicuously and +appropriately publish on each copy an appropriate copyright notice and +disclaimer of warranty; keep intact all the notices that refer to this License +and to the absence of any warranty; and give any other recipients of the +Program a copy of this License along with the Program. + +You may charge a fee for the physical act of transferring a copy, and you may +at your option offer warranty protection in exchange for a fee. + +2. You may modify your copy or copies of the Program or any portion of it, thus +forming a work based on the Program, and copy and distribute such modifications +or work under the terms of Section 1 above, provided that you also meet all of +these conditions: + + a) You must cause the modified files to carry prominent notices stating + that you changed the files and the date of any change. + + b) You must cause any work that you distribute or publish, that in whole or + in part contains or is derived from the Program or any part thereof, to be + licensed as a whole at no charge to all third parties under the terms of + this License. + + c) If the modified program normally reads commands interactively when run, + you must cause it, when started running for such interactive use in the + most ordinary way, to print or display an announcement including an + appropriate copyright notice and a notice that there is no warranty (or + else, saying that you provide a warranty) and that users may redistribute + the program under these conditions, and telling the user how to view a copy + of this License. (Exception: if the Program itself is interactive but does + not normally print such an announcement, your work based on the Program is + not required to print an announcement.) + +These requirements apply to the modified work as a whole. If identifiable +sections of that work are not derived from the Program, and can be reasonably +considered independent and separate works in themselves, then this License, and +its terms, do not apply to those sections when you distribute them as separate +works. But when you distribute the same sections as part of a whole which is a +work based on the Program, the distribution of the whole must be on the terms +of this License, whose permissions for other licensees extend to the entire +whole, and thus to each and every part regardless of who wrote it. + +Thus, it is not the intent of this section to claim rights or contest your +rights to work written entirely by you; rather, the intent is to exercise the +right to control the distribution of derivative or collective works based on +the Program. + +In addition, mere aggregation of another work not based on the Program with the +Program (or with a work based on the Program) on a volume of a storage or +distribution medium does not bring the other work under the scope of this +License. + +3. You may copy and distribute the Program (or a work based on it, under +Section 2) in object code or executable form under the terms of Sections 1 and +2 above provided that you also do one of the following: + + a) Accompany it with the complete corresponding machine-readable source + code, which must be distributed under the terms of Sections 1 and 2 above + on a medium customarily used for software interchange; or, + + b) Accompany it with a written offer, valid for at least three years, to + give any third party, for a charge no more than your cost of physically + performing source distribution, a complete machine-readable copy of the + corresponding source code, to be distributed under the terms of Sections 1 + and 2 above on a medium customarily used for software interchange; or, + + c) Accompany it with the information you received as to the offer to + distribute corresponding source code. (This alternative is allowed only + for noncommercial distribution and only if you received the program in + object code or executable form with such an offer, in accord with + Subsection b above.) + +The source code for a work means the preferred form of the work for making +modifications to it. For an executable work, complete source code means all +the source code for all modules it contains, plus any associated interface +definition files, plus the scripts used to control compilation and installation +of the executable. However, as a special exception, the source code +distributed need not include anything that is normally distributed (in either +source or binary form) with the major components (compiler, kernel, and so on) +of the operating system on which the executable runs, unless that component +itself accompanies the executable. + +If distribution of executable or object code is made by offering access to copy +from a designated place, then offering equivalent access to copy the source +code from the same place counts as distribution of the source code, even though +third parties are not compelled to copy the source along with the object code. + +4. You may not copy, modify, sublicense, or distribute the Program except as +expressly provided under this License. Any attempt otherwise to copy, modify, +sublicense or distribute the Program is void, and will automatically terminate +your rights under this License. However, parties who have received copies, or +rights, from you under this License will not have their licenses terminated so +long as such parties remain in full compliance. + +5. You are not required to accept this License, since you have not signed it. +However, nothing else grants you permission to modify or distribute the Program +or its derivative works. These actions are prohibited by law if you do not +accept this License. Therefore, by modifying or distributing the Program (or +any work based on the Program), you indicate your acceptance of this License to +do so, and all its terms and conditions for copying, distributing or modifying +the Program or works based on it. + +6. Each time you redistribute the Program (or any work based on the Program), +the recipient automatically receives a license from the original licensor to +copy, distribute or modify the Program subject to these terms and conditions. +You may not impose any further restrictions on the recipients' exercise of the +rights granted herein. You are not responsible for enforcing compliance by +third parties to this License. + +7. If, as a consequence of a court judgment or allegation of patent +infringement or for any other reason (not limited to patent issues), conditions +are imposed on you (whether by court order, agreement or otherwise) that +contradict the conditions of this License, they do not excuse you from the +conditions of this License. If you cannot distribute so as to satisfy +simultaneously your obligations under this License and any other pertinent +obligations, then as a consequence you may not distribute the Program at all. +For example, if a patent license would not permit royalty-free redistribution +of the Program by all those who receive copies directly or indirectly through +you, then the only way you could satisfy both it and this License would be to +refrain entirely from distribution of the Program. + +If any portion of this section is held invalid or unenforceable under any +particular circumstance, the balance of the section is intended to apply and +the section as a whole is intended to apply in other circumstances. + +It is not the purpose of this section to induce you to infringe any patents or +other property right claims or to contest validity of any such claims; this +section has the sole purpose of protecting the integrity of the free software +distribution system, which is implemented by public license practices. Many +people have made generous contributions to the wide range of software +distributed through that system in reliance on consistent application of that +system; it is up to the author/donor to decide if he or she is willing to +distribute software through any other system and a licensee cannot impose that +choice. + +This section is intended to make thoroughly clear what is believed to be a +consequence of the rest of this License. + +8. If the distribution and/or use of the Program is restricted in certain +countries either by patents or by copyrighted interfaces, the original +copyright holder who places the Program under this License may add an explicit +geographical distribution limitation excluding those countries, so that +distribution is permitted only in or among countries not thus excluded. In +such case, this License incorporates the limitation as if written in the body +of this License. + +9. The Free Software Foundation may publish revised and/or new versions of the +General Public License from time to time. Such new versions will be similar in +spirit to the present version, but may differ in detail to address new problems +or concerns. + +Each version is given a distinguishing version number. If the Program +specifies a version number of this License which applies to it and "any later +version", you have the option of following the terms and conditions either of +that version or of any later version published by the Free Software Foundation. +If the Program does not specify a version number of this License, you may +choose any version ever published by the Free Software Foundation. + +10. If you wish to incorporate parts of the Program into other free programs +whose distribution conditions are different, write to the author to ask for +permission. For software which is copyrighted by the Free Software Foundation, +write to the Free Software Foundation; we sometimes make exceptions for this. +Our decision will be guided by the two goals of preserving the free status of +all derivatives of our free software and of promoting the sharing and reuse of +software generally. + +NO WARRANTY + +11. BECAUSE THE PROGRAM IS LICENSED FREE OF CHARGE, THERE IS NO WARRANTY FOR +THE PROGRAM, TO THE EXTENT PERMITTED BY APPLICABLE LAW. EXCEPT WHEN OTHERWISE +STATED IN WRITING THE COPYRIGHT HOLDERS AND/OR OTHER PARTIES PROVIDE THE +PROGRAM "AS IS" WITHOUT WARRANTY OF ANY KIND, EITHER EXPRESSED OR IMPLIED, +INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND +FITNESS FOR A PARTICULAR PURPOSE. THE ENTIRE RISK AS TO THE QUALITY AND +PERFORMANCE OF THE PROGRAM IS WITH YOU. SHOULD THE PROGRAM PROVE DEFECTIVE, +YOU ASSUME THE COST OF ALL NECESSARY SERVICING, REPAIR OR CORRECTION. + +12. IN NO EVENT UNLESS REQUIRED BY APPLICABLE LAW OR AGREED TO IN WRITING WILL +ANY COPYRIGHT HOLDER, OR ANY OTHER PARTY WHO MAY MODIFY AND/OR REDISTRIBUTE THE +PROGRAM AS PERMITTED ABOVE, BE LIABLE TO YOU FOR DAMAGES, INCLUDING ANY +GENERAL, SPECIAL, INCIDENTAL OR CONSEQUENTIAL DAMAGES ARISING OUT OF THE USE OR +INABILITY TO USE THE PROGRAM (INCLUDING BUT NOT LIMITED TO LOSS OF DATA OR DATA +BEING RENDERED INACCURATE OR LOSSES SUSTAINED BY YOU OR THIRD PARTIES OR A +FAILURE OF THE PROGRAM TO OPERATE WITH ANY OTHER PROGRAMS), EVEN IF SUCH HOLDER +OR OTHER PARTY HAS BEEN ADVISED OF THE POSSIBILITY OF SUCH DAMAGES. + +END OF TERMS AND CONDITIONS + +How to Apply These Terms to Your New Programs + +If you develop a new program, and you want it to be of the greatest possible +use to the public, the best way to achieve this is to make it free software +which everyone can redistribute and change under these terms. + +To do so, attach the following notices to the program. It is safest to attach +them to the start of each source file to most effectively convey the exclusion +of warranty; and each file should have at least the "copyright" line and a +pointer to where the full notice is found. + + One line to give the program's name and a brief idea of what it does. + + Copyright (C) + + This program is free software; you can redistribute it and/or modify it + under the terms of the GNU General Public License as published by the Free + Software Foundation; either version 2 of the License, or (at your option) + any later version. + + This program is distributed in the hope that it will be useful, but WITHOUT + ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for + more details. + + You should have received a copy of the GNU General Public License along + with this program; if not, write to the Free Software Foundation, Inc., 59 + Temple Place, Suite 330, Boston, MA 02111-1307 USA + +Also add information on how to contact you by electronic and paper mail. + +If the program is interactive, make it output a short notice like this when it +starts in an interactive mode: + + Gnomovision version 69, Copyright (C) year name of author Gnomovision comes + with ABSOLUTELY NO WARRANTY; for details type 'show w'. This is free + software, and you are welcome to redistribute it under certain conditions; + type 'show c' for details. + +The hypothetical commands 'show w' and 'show c' should show the appropriate +parts of the General Public License. Of course, the commands you use may be +called something other than 'show w' and 'show c'; they could even be +mouse-clicks or menu items--whatever suits your program. + +You should also get your employer (if you work as a programmer) or your school, +if any, to sign a "copyright disclaimer" for the program, if necessary. Here +is a sample; alter the names: + + Yoyodyne, Inc., hereby disclaims all copyright interest in the program + 'Gnomovision' (which makes passes at compilers) written by James Hacker. + + signature of Ty Coon, 1 April 1989 + + Ty Coon, President of Vice + +This General Public License does not permit incorporating your program into +proprietary programs. If your program is a subroutine library, you may +consider it more useful to permit linking proprietary applications with the +library. If this is what you want to do, use the GNU Library General Public +License instead of this License. + + +"CLASSPATH" EXCEPTION TO THE GPL + +Certain source files distributed by Oracle America and/or its affiliates are +subject to the following clarification and special exception to the GPL, but +only where Oracle has expressly included in the particular source file's header +the words "Oracle designates this particular file as subject to the "Classpath" +exception as provided by Oracle in the LICENSE file that accompanied this code." + + Linking this library statically or dynamically with other modules is making + a combined work based on this library. Thus, the terms and conditions of + the GNU General Public License cover the whole combination. + + As a special exception, the copyright holders of this library give you + permission to link this library with independent modules to produce an + executable, regardless of the license terms of these independent modules, + and to copy and distribute the resulting executable under terms of your + choice, provided that you also meet, for each linked independent module, + the terms and conditions of the license of that module. An independent + module is a module which is not derived from or based on this library. If + you modify this library, you may extend this exception to your version of + the library, but you are not obligated to do so. If you do not wish to do + so, delete this exception statement from your version. diff --git a/licenses/LICENSE-jopt-simple-5.0.4.txt b/licenses/LICENSE-jopt-simple-5.0.4.txt new file mode 100644 index 000000000000..73fb1528f1e2 --- /dev/null +++ b/licenses/LICENSE-jopt-simple-5.0.4.txt @@ -0,0 +1,7 @@ + + +301 Moved Permanently + +

Moved Permanently

+

The document has moved here.

+ diff --git a/licenses/LICENSE-jsr305-3.0.2.txt b/licenses/LICENSE-jsr305-3.0.2.txt new file mode 100644 index 000000000000..d64569567334 --- /dev/null +++ b/licenses/LICENSE-jsr305-3.0.2.txt @@ -0,0 +1,202 @@ + + Apache License + Version 2.0, January 2004 + http://www.apache.org/licenses/ + + TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION + + 1. Definitions. + + "License" shall mean the terms and conditions for use, reproduction, + and distribution as defined by Sections 1 through 9 of this document. + + "Licensor" shall mean the copyright owner or entity authorized by + the copyright owner that is granting the License. + + "Legal Entity" shall mean the union of the acting entity and all + other entities that control, are controlled by, or are under common + control with that entity. For the purposes of this definition, + "control" means (i) the power, direct or indirect, to cause the + direction or management of such entity, whether by contract or + otherwise, or (ii) ownership of fifty percent (50%) or more of the + outstanding shares, or (iii) beneficial ownership of such entity. + + "You" (or "Your") shall mean an individual or Legal Entity + exercising permissions granted by this License. + + "Source" form shall mean the preferred form for making modifications, + including but not limited to software source code, documentation + source, and configuration files. + + "Object" form shall mean any form resulting from mechanical + transformation or translation of a Source form, including but + not limited to compiled object code, generated documentation, + and conversions to other media types. + + "Work" shall mean the work of authorship, whether in Source or + Object form, made available under the License, as indicated by a + copyright notice that is included in or attached to the work + (an example is provided in the Appendix below). + + "Derivative Works" shall mean any work, whether in Source or Object + form, that is based on (or derived from) the Work and for which the + editorial revisions, annotations, elaborations, or other modifications + represent, as a whole, an original work of authorship. For the purposes + of this License, Derivative Works shall not include works that remain + separable from, or merely link (or bind by name) to the interfaces of, + the Work and Derivative Works thereof. + + "Contribution" shall mean any work of authorship, including + the original version of the Work and any modifications or additions + to that Work or Derivative Works thereof, that is intentionally + submitted to Licensor for inclusion in the Work by the copyright owner + or by an individual or Legal Entity authorized to submit on behalf of + the copyright owner. For the purposes of this definition, "submitted" + means any form of electronic, verbal, or written communication sent + to the Licensor or its representatives, including but not limited to + communication on electronic mailing lists, source code control systems, + and issue tracking systems that are managed by, or on behalf of, the + Licensor for the purpose of discussing and improving the Work, but + excluding communication that is conspicuously marked or otherwise + designated in writing by the copyright owner as "Not a Contribution." + + "Contributor" shall mean Licensor and any individual or Legal Entity + on behalf of whom a Contribution has been received by Licensor and + subsequently incorporated within the Work. + + 2. Grant of Copyright License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + copyright license to reproduce, prepare Derivative Works of, + publicly display, publicly perform, sublicense, and distribute the + Work and such Derivative Works in Source or Object form. + + 3. Grant of Patent License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + (except as stated in this section) patent license to make, have made, + use, offer to sell, sell, import, and otherwise transfer the Work, + where such license applies only to those patent claims licensable + by such Contributor that are necessarily infringed by their + Contribution(s) alone or by combination of their Contribution(s) + with the Work to which such Contribution(s) was submitted. If You + institute patent litigation against any entity (including a + cross-claim or counterclaim in a lawsuit) alleging that the Work + or a Contribution incorporated within the Work constitutes direct + or contributory patent infringement, then any patent licenses + granted to You under this License for that Work shall terminate + as of the date such litigation is filed. + + 4. Redistribution. You may reproduce and distribute copies of the + Work or Derivative Works thereof in any medium, with or without + modifications, and in Source or Object form, provided that You + meet the following conditions: + + (a) You must give any other recipients of the Work or + Derivative Works a copy of this License; and + + (b) You must cause any modified files to carry prominent notices + stating that You changed the files; and + + (c) You must retain, in the Source form of any Derivative Works + that You distribute, all copyright, patent, trademark, and + attribution notices from the Source form of the Work, + excluding those notices that do not pertain to any part of + the Derivative Works; and + + (d) If the Work includes a "NOTICE" text file as part of its + distribution, then any Derivative Works that You distribute must + include a readable copy of the attribution notices contained + within such NOTICE file, excluding those notices that do not + pertain to any part of the Derivative Works, in at least one + of the following places: within a NOTICE text file distributed + as part of the Derivative Works; within the Source form or + documentation, if provided along with the Derivative Works; or, + within a display generated by the Derivative Works, if and + wherever such third-party notices normally appear. The contents + of the NOTICE file are for informational purposes only and + do not modify the License. You may add Your own attribution + notices within Derivative Works that You distribute, alongside + or as an addendum to the NOTICE text from the Work, provided + that such additional attribution notices cannot be construed + as modifying the License. + + You may add Your own copyright statement to Your modifications and + may provide additional or different license terms and conditions + for use, reproduction, or distribution of Your modifications, or + for any such Derivative Works as a whole, provided Your use, + reproduction, and distribution of the Work otherwise complies with + the conditions stated in this License. + + 5. Submission of Contributions. Unless You explicitly state otherwise, + any Contribution intentionally submitted for inclusion in the Work + by You to the Licensor shall be under the terms and conditions of + this License, without any additional terms or conditions. + Notwithstanding the above, nothing herein shall supersede or modify + the terms of any separate license agreement you may have executed + with Licensor regarding such Contributions. + + 6. Trademarks. This License does not grant permission to use the trade + names, trademarks, service marks, or product names of the Licensor, + except as required for reasonable and customary use in describing the + origin of the Work and reproducing the content of the NOTICE file. + + 7. Disclaimer of Warranty. Unless required by applicable law or + agreed to in writing, Licensor provides the Work (and each + Contributor provides its Contributions) on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or + implied, including, without limitation, any warranties or conditions + of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A + PARTICULAR PURPOSE. You are solely responsible for determining the + appropriateness of using or redistributing the Work and assume any + risks associated with Your exercise of permissions under this License. + + 8. Limitation of Liability. In no event and under no legal theory, + whether in tort (including negligence), contract, or otherwise, + unless required by applicable law (such as deliberate and grossly + negligent acts) or agreed to in writing, shall any Contributor be + liable to You for damages, including any direct, indirect, special, + incidental, or consequential damages of any character arising as a + result of this License or out of the use or inability to use the + Work (including but not limited to damages for loss of goodwill, + work stoppage, computer failure or malfunction, or any and all + other commercial damages or losses), even if such Contributor + has been advised of the possibility of such damages. + + 9. Accepting Warranty or Additional Liability. While redistributing + the Work or Derivative Works thereof, You may choose to offer, + and charge a fee for, acceptance of support, warranty, indemnity, + or other liability obligations and/or rights consistent with this + License. However, in accepting such obligations, You may act only + on Your own behalf and on Your sole responsibility, not on behalf + of any other Contributor, and only if You agree to indemnify, + defend, and hold each Contributor harmless for any liability + incurred by, or claims asserted against, such Contributor by reason + of your accepting any such warranty or additional liability. + + END OF TERMS AND CONDITIONS + + APPENDIX: How to apply the Apache License to your work. + + To apply the Apache License to your work, attach the following + boilerplate notice, with the fields enclosed by brackets "[]" + replaced with your own identifying information. (Don't include + the brackets!) The text should be enclosed in the appropriate + comment syntax for the file format. We also recommend that a + file or class name and description of purpose be included on the + same "printed page" as the copyright notice for easier + identification within third-party archives. + + Copyright [yyyy] [name of copyright owner] + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. diff --git a/licenses/LICENSE-log4j-1.2.17.txt b/licenses/LICENSE-log4j-1.2.17.txt new file mode 100644 index 000000000000..d64569567334 --- /dev/null +++ b/licenses/LICENSE-log4j-1.2.17.txt @@ -0,0 +1,202 @@ + + Apache License + Version 2.0, January 2004 + http://www.apache.org/licenses/ + + TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION + + 1. Definitions. + + "License" shall mean the terms and conditions for use, reproduction, + and distribution as defined by Sections 1 through 9 of this document. + + "Licensor" shall mean the copyright owner or entity authorized by + the copyright owner that is granting the License. + + "Legal Entity" shall mean the union of the acting entity and all + other entities that control, are controlled by, or are under common + control with that entity. For the purposes of this definition, + "control" means (i) the power, direct or indirect, to cause the + direction or management of such entity, whether by contract or + otherwise, or (ii) ownership of fifty percent (50%) or more of the + outstanding shares, or (iii) beneficial ownership of such entity. + + "You" (or "Your") shall mean an individual or Legal Entity + exercising permissions granted by this License. + + "Source" form shall mean the preferred form for making modifications, + including but not limited to software source code, documentation + source, and configuration files. + + "Object" form shall mean any form resulting from mechanical + transformation or translation of a Source form, including but + not limited to compiled object code, generated documentation, + and conversions to other media types. + + "Work" shall mean the work of authorship, whether in Source or + Object form, made available under the License, as indicated by a + copyright notice that is included in or attached to the work + (an example is provided in the Appendix below). + + "Derivative Works" shall mean any work, whether in Source or Object + form, that is based on (or derived from) the Work and for which the + editorial revisions, annotations, elaborations, or other modifications + represent, as a whole, an original work of authorship. For the purposes + of this License, Derivative Works shall not include works that remain + separable from, or merely link (or bind by name) to the interfaces of, + the Work and Derivative Works thereof. + + "Contribution" shall mean any work of authorship, including + the original version of the Work and any modifications or additions + to that Work or Derivative Works thereof, that is intentionally + submitted to Licensor for inclusion in the Work by the copyright owner + or by an individual or Legal Entity authorized to submit on behalf of + the copyright owner. For the purposes of this definition, "submitted" + means any form of electronic, verbal, or written communication sent + to the Licensor or its representatives, including but not limited to + communication on electronic mailing lists, source code control systems, + and issue tracking systems that are managed by, or on behalf of, the + Licensor for the purpose of discussing and improving the Work, but + excluding communication that is conspicuously marked or otherwise + designated in writing by the copyright owner as "Not a Contribution." + + "Contributor" shall mean Licensor and any individual or Legal Entity + on behalf of whom a Contribution has been received by Licensor and + subsequently incorporated within the Work. + + 2. Grant of Copyright License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + copyright license to reproduce, prepare Derivative Works of, + publicly display, publicly perform, sublicense, and distribute the + Work and such Derivative Works in Source or Object form. + + 3. Grant of Patent License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + (except as stated in this section) patent license to make, have made, + use, offer to sell, sell, import, and otherwise transfer the Work, + where such license applies only to those patent claims licensable + by such Contributor that are necessarily infringed by their + Contribution(s) alone or by combination of their Contribution(s) + with the Work to which such Contribution(s) was submitted. If You + institute patent litigation against any entity (including a + cross-claim or counterclaim in a lawsuit) alleging that the Work + or a Contribution incorporated within the Work constitutes direct + or contributory patent infringement, then any patent licenses + granted to You under this License for that Work shall terminate + as of the date such litigation is filed. + + 4. Redistribution. You may reproduce and distribute copies of the + Work or Derivative Works thereof in any medium, with or without + modifications, and in Source or Object form, provided that You + meet the following conditions: + + (a) You must give any other recipients of the Work or + Derivative Works a copy of this License; and + + (b) You must cause any modified files to carry prominent notices + stating that You changed the files; and + + (c) You must retain, in the Source form of any Derivative Works + that You distribute, all copyright, patent, trademark, and + attribution notices from the Source form of the Work, + excluding those notices that do not pertain to any part of + the Derivative Works; and + + (d) If the Work includes a "NOTICE" text file as part of its + distribution, then any Derivative Works that You distribute must + include a readable copy of the attribution notices contained + within such NOTICE file, excluding those notices that do not + pertain to any part of the Derivative Works, in at least one + of the following places: within a NOTICE text file distributed + as part of the Derivative Works; within the Source form or + documentation, if provided along with the Derivative Works; or, + within a display generated by the Derivative Works, if and + wherever such third-party notices normally appear. The contents + of the NOTICE file are for informational purposes only and + do not modify the License. You may add Your own attribution + notices within Derivative Works that You distribute, alongside + or as an addendum to the NOTICE text from the Work, provided + that such additional attribution notices cannot be construed + as modifying the License. + + You may add Your own copyright statement to Your modifications and + may provide additional or different license terms and conditions + for use, reproduction, or distribution of Your modifications, or + for any such Derivative Works as a whole, provided Your use, + reproduction, and distribution of the Work otherwise complies with + the conditions stated in this License. + + 5. Submission of Contributions. Unless You explicitly state otherwise, + any Contribution intentionally submitted for inclusion in the Work + by You to the Licensor shall be under the terms and conditions of + this License, without any additional terms or conditions. + Notwithstanding the above, nothing herein shall supersede or modify + the terms of any separate license agreement you may have executed + with Licensor regarding such Contributions. + + 6. Trademarks. This License does not grant permission to use the trade + names, trademarks, service marks, or product names of the Licensor, + except as required for reasonable and customary use in describing the + origin of the Work and reproducing the content of the NOTICE file. + + 7. Disclaimer of Warranty. Unless required by applicable law or + agreed to in writing, Licensor provides the Work (and each + Contributor provides its Contributions) on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or + implied, including, without limitation, any warranties or conditions + of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A + PARTICULAR PURPOSE. You are solely responsible for determining the + appropriateness of using or redistributing the Work and assume any + risks associated with Your exercise of permissions under this License. + + 8. Limitation of Liability. In no event and under no legal theory, + whether in tort (including negligence), contract, or otherwise, + unless required by applicable law (such as deliberate and grossly + negligent acts) or agreed to in writing, shall any Contributor be + liable to You for damages, including any direct, indirect, special, + incidental, or consequential damages of any character arising as a + result of this License or out of the use or inability to use the + Work (including but not limited to damages for loss of goodwill, + work stoppage, computer failure or malfunction, or any and all + other commercial damages or losses), even if such Contributor + has been advised of the possibility of such damages. + + 9. Accepting Warranty or Additional Liability. While redistributing + the Work or Derivative Works thereof, You may choose to offer, + and charge a fee for, acceptance of support, warranty, indemnity, + or other liability obligations and/or rights consistent with this + License. However, in accepting such obligations, You may act only + on Your own behalf and on Your sole responsibility, not on behalf + of any other Contributor, and only if You agree to indemnify, + defend, and hold each Contributor harmless for any liability + incurred by, or claims asserted against, such Contributor by reason + of your accepting any such warranty or additional liability. + + END OF TERMS AND CONDITIONS + + APPENDIX: How to apply the Apache License to your work. + + To apply the Apache License to your work, attach the following + boilerplate notice, with the fields enclosed by brackets "[]" + replaced with your own identifying information. (Don't include + the brackets!) The text should be enclosed in the appropriate + comment syntax for the file format. We also recommend that a + file or class name and description of purpose be included on the + same "printed page" as the copyright notice for easier + identification within third-party archives. + + Copyright [yyyy] [name of copyright owner] + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. diff --git a/licenses/LICENSE-netty-3.7.0.Final.txt b/licenses/LICENSE-netty-3.7.0.Final.txt new file mode 100644 index 000000000000..5a7c2636bc42 --- /dev/null +++ b/licenses/LICENSE-netty-3.7.0.Final.txt @@ -0,0 +1,421 @@ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + Apache License, Version 2.0 + + + + + + + + + + + +
+ +
+ +
+
+
+ Apache Logo +
+
+ + + +
+
+
+ + +
+ The Apache Way + Contribute + ASF Sponsors +
+
+
+
+

Apache License

Version 2.0, January 2004

+http://www.apache.org/licenses/

+

TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION

+

1. Definitions.

+

"License" shall mean the terms and conditions for use, reproduction, and +distribution as defined by Sections 1 through 9 of this document.

+

"Licensor" shall mean the copyright owner or entity authorized by the +copyright owner that is granting the License.

+

"Legal Entity" shall mean the union of the acting entity and all other +entities that control, are controlled by, or are under common control with +that entity. For the purposes of this definition, "control" means (i) the +power, direct or indirect, to cause the direction or management of such +entity, whether by contract or otherwise, or (ii) ownership of fifty +percent (50%) or more of the outstanding shares, or (iii) beneficial +ownership of such entity.

+

"You" (or "Your") shall mean an individual or Legal Entity exercising +permissions granted by this License.

+

"Source" form shall mean the preferred form for making modifications, +including but not limited to software source code, documentation source, +and configuration files.

+

"Object" form shall mean any form resulting from mechanical transformation +or translation of a Source form, including but not limited to compiled +object code, generated documentation, and conversions to other media types.

+

"Work" shall mean the work of authorship, whether in Source or Object form, +made available under the License, as indicated by a copyright notice that +is included in or attached to the work (an example is provided in the +Appendix below).

+

"Derivative Works" shall mean any work, whether in Source or Object form, +that is based on (or derived from) the Work and for which the editorial +revisions, annotations, elaborations, or other modifications represent, as +a whole, an original work of authorship. For the purposes of this License, +Derivative Works shall not include works that remain separable from, or +merely link (or bind by name) to the interfaces of, the Work and Derivative +Works thereof.

+

"Contribution" shall mean any work of authorship, including the original +version of the Work and any modifications or additions to that Work or +Derivative Works thereof, that is intentionally submitted to Licensor for +inclusion in the Work by the copyright owner or by an individual or Legal +Entity authorized to submit on behalf of the copyright owner. For the +purposes of this definition, "submitted" means any form of electronic, +verbal, or written communication sent to the Licensor or its +representatives, including but not limited to communication on electronic +mailing lists, source code control systems, and issue tracking systems that +are managed by, or on behalf of, the Licensor for the purpose of discussing +and improving the Work, but excluding communication that is conspicuously +marked or otherwise designated in writing by the copyright owner as "Not a +Contribution."

+

"Contributor" shall mean Licensor and any individual or Legal Entity on +behalf of whom a Contribution has been received by Licensor and +subsequently incorporated within the Work.

+

2. Grant of Copyright License. Subject to the +terms and conditions of this License, each Contributor hereby grants to You +a perpetual, worldwide, non-exclusive, no-charge, royalty-free, irrevocable +copyright license to reproduce, prepare Derivative Works of, publicly +display, publicly perform, sublicense, and distribute the Work and such +Derivative Works in Source or Object form.

+

3. Grant of Patent License. Subject to the terms +and conditions of this License, each Contributor hereby grants to You a +perpetual, worldwide, non-exclusive, no-charge, royalty-free, irrevocable +(except as stated in this section) patent license to make, have made, use, +offer to sell, sell, import, and otherwise transfer the Work, where such +license applies only to those patent claims licensable by such Contributor +that are necessarily infringed by their Contribution(s) alone or by +combination of their Contribution(s) with the Work to which such +Contribution(s) was submitted. If You institute patent litigation against +any entity (including a cross-claim or counterclaim in a lawsuit) alleging +that the Work or a Contribution incorporated within the Work constitutes +direct or contributory patent infringement, then any patent licenses +granted to You under this License for that Work shall terminate as of the +date such litigation is filed.

+

4. Redistribution. You may reproduce and +distribute copies of the Work or Derivative Works thereof in any medium, +with or without modifications, and in Source or Object form, provided that +You meet the following conditions:

+
    +
  1. You must give any other recipients of the Work or Derivative Works a +copy of this License; and
  2. + +
  3. You must cause any modified files to carry prominent notices stating +that You changed the files; and
  4. + +
  5. You must retain, in the Source form of any Derivative Works that You +distribute, all copyright, patent, trademark, and attribution notices from +the Source form of the Work, excluding those notices that do not pertain to +any part of the Derivative Works; and
  6. + +
  7. If the Work includes a "NOTICE" text file as part of its distribution, +then any Derivative Works that You distribute must include a readable copy +of the attribution notices contained within such NOTICE file, excluding +those notices that do not pertain to any part of the Derivative Works, in +at least one of the following places: within a NOTICE text file distributed +as part of the Derivative Works; within the Source form or documentation, +if provided along with the Derivative Works; or, within a display generated +by the Derivative Works, if and wherever such third-party notices normally +appear. The contents of the NOTICE file are for informational purposes only +and do not modify the License. You may add Your own attribution notices +within Derivative Works that You distribute, alongside or as an addendum to +the NOTICE text from the Work, provided that such additional attribution +notices cannot be construed as modifying the License. +
    +
    +You may add Your own copyright statement to Your modifications and may +provide additional or different license terms and conditions for use, +reproduction, or distribution of Your modifications, or for any such +Derivative Works as a whole, provided Your use, reproduction, and +distribution of the Work otherwise complies with the conditions stated in +this License. +
  8. + +
+ +

5. Submission of Contributions. Unless You +explicitly state otherwise, any Contribution intentionally submitted for +inclusion in the Work by You to the Licensor shall be under the terms and +conditions of this License, without any additional terms or conditions. +Notwithstanding the above, nothing herein shall supersede or modify the +terms of any separate license agreement you may have executed with Licensor +regarding such Contributions.

+

6. Trademarks. This License does not grant +permission to use the trade names, trademarks, service marks, or product +names of the Licensor, except as required for reasonable and customary use +in describing the origin of the Work and reproducing the content of the +NOTICE file.

+

7. Disclaimer of Warranty. Unless required by +applicable law or agreed to in writing, Licensor provides the Work (and +each Contributor provides its Contributions) on an "AS IS" BASIS, WITHOUT +WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied, including, +without limitation, any warranties or conditions of TITLE, +NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A PARTICULAR PURPOSE. You +are solely responsible for determining the appropriateness of using or +redistributing the Work and assume any risks associated with Your exercise +of permissions under this License.

+

8. Limitation of Liability. In no event and +under no legal theory, whether in tort (including negligence), contract, or +otherwise, unless required by applicable law (such as deliberate and +grossly negligent acts) or agreed to in writing, shall any Contributor be +liable to You for damages, including any direct, indirect, special, +incidental, or consequential damages of any character arising as a result +of this License or out of the use or inability to use the Work (including +but not limited to damages for loss of goodwill, work stoppage, computer +failure or malfunction, or any and all other commercial damages or losses), +even if such Contributor has been advised of the possibility of such +damages.

+

9. Accepting Warranty or Additional Liability. +While redistributing the Work or Derivative Works thereof, You may choose +to offer, and charge a fee for, acceptance of support, warranty, indemnity, +or other liability obligations and/or rights consistent with this License. +However, in accepting such obligations, You may act only on Your own behalf +and on Your sole responsibility, not on behalf of any other Contributor, +and only if You agree to indemnify, defend, and hold each Contributor +harmless for any liability incurred by, or claims asserted against, such +Contributor by reason of your accepting any such warranty or additional +liability.

+

END OF TERMS AND CONDITIONS

+

APPENDIX: How to apply the Apache License to your work

+

To apply the Apache License to your work, attach the following boilerplate +notice, with the fields enclosed by brackets "[]" replaced with your own +identifying information. (Don't include the brackets!) The text should be +enclosed in the appropriate comment syntax for the file format. We also +recommend that a file or class name and description of purpose be included +on the same "printed page" as the copyright notice for easier +identification within third-party archives.

+
Copyright [yyyy] [name of copyright owner]
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+    http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+
+ + + + + + + + + + + diff --git a/licenses/LICENSE-snappy-java-1.1.1.3.txt b/licenses/LICENSE-snappy-java-1.1.1.3.txt new file mode 100644 index 000000000000..d64569567334 --- /dev/null +++ b/licenses/LICENSE-snappy-java-1.1.1.3.txt @@ -0,0 +1,202 @@ + + Apache License + Version 2.0, January 2004 + http://www.apache.org/licenses/ + + TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION + + 1. Definitions. + + "License" shall mean the terms and conditions for use, reproduction, + and distribution as defined by Sections 1 through 9 of this document. + + "Licensor" shall mean the copyright owner or entity authorized by + the copyright owner that is granting the License. + + "Legal Entity" shall mean the union of the acting entity and all + other entities that control, are controlled by, or are under common + control with that entity. For the purposes of this definition, + "control" means (i) the power, direct or indirect, to cause the + direction or management of such entity, whether by contract or + otherwise, or (ii) ownership of fifty percent (50%) or more of the + outstanding shares, or (iii) beneficial ownership of such entity. + + "You" (or "Your") shall mean an individual or Legal Entity + exercising permissions granted by this License. + + "Source" form shall mean the preferred form for making modifications, + including but not limited to software source code, documentation + source, and configuration files. + + "Object" form shall mean any form resulting from mechanical + transformation or translation of a Source form, including but + not limited to compiled object code, generated documentation, + and conversions to other media types. + + "Work" shall mean the work of authorship, whether in Source or + Object form, made available under the License, as indicated by a + copyright notice that is included in or attached to the work + (an example is provided in the Appendix below). + + "Derivative Works" shall mean any work, whether in Source or Object + form, that is based on (or derived from) the Work and for which the + editorial revisions, annotations, elaborations, or other modifications + represent, as a whole, an original work of authorship. For the purposes + of this License, Derivative Works shall not include works that remain + separable from, or merely link (or bind by name) to the interfaces of, + the Work and Derivative Works thereof. + + "Contribution" shall mean any work of authorship, including + the original version of the Work and any modifications or additions + to that Work or Derivative Works thereof, that is intentionally + submitted to Licensor for inclusion in the Work by the copyright owner + or by an individual or Legal Entity authorized to submit on behalf of + the copyright owner. For the purposes of this definition, "submitted" + means any form of electronic, verbal, or written communication sent + to the Licensor or its representatives, including but not limited to + communication on electronic mailing lists, source code control systems, + and issue tracking systems that are managed by, or on behalf of, the + Licensor for the purpose of discussing and improving the Work, but + excluding communication that is conspicuously marked or otherwise + designated in writing by the copyright owner as "Not a Contribution." + + "Contributor" shall mean Licensor and any individual or Legal Entity + on behalf of whom a Contribution has been received by Licensor and + subsequently incorporated within the Work. + + 2. Grant of Copyright License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + copyright license to reproduce, prepare Derivative Works of, + publicly display, publicly perform, sublicense, and distribute the + Work and such Derivative Works in Source or Object form. + + 3. Grant of Patent License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + (except as stated in this section) patent license to make, have made, + use, offer to sell, sell, import, and otherwise transfer the Work, + where such license applies only to those patent claims licensable + by such Contributor that are necessarily infringed by their + Contribution(s) alone or by combination of their Contribution(s) + with the Work to which such Contribution(s) was submitted. If You + institute patent litigation against any entity (including a + cross-claim or counterclaim in a lawsuit) alleging that the Work + or a Contribution incorporated within the Work constitutes direct + or contributory patent infringement, then any patent licenses + granted to You under this License for that Work shall terminate + as of the date such litigation is filed. + + 4. Redistribution. You may reproduce and distribute copies of the + Work or Derivative Works thereof in any medium, with or without + modifications, and in Source or Object form, provided that You + meet the following conditions: + + (a) You must give any other recipients of the Work or + Derivative Works a copy of this License; and + + (b) You must cause any modified files to carry prominent notices + stating that You changed the files; and + + (c) You must retain, in the Source form of any Derivative Works + that You distribute, all copyright, patent, trademark, and + attribution notices from the Source form of the Work, + excluding those notices that do not pertain to any part of + the Derivative Works; and + + (d) If the Work includes a "NOTICE" text file as part of its + distribution, then any Derivative Works that You distribute must + include a readable copy of the attribution notices contained + within such NOTICE file, excluding those notices that do not + pertain to any part of the Derivative Works, in at least one + of the following places: within a NOTICE text file distributed + as part of the Derivative Works; within the Source form or + documentation, if provided along with the Derivative Works; or, + within a display generated by the Derivative Works, if and + wherever such third-party notices normally appear. The contents + of the NOTICE file are for informational purposes only and + do not modify the License. You may add Your own attribution + notices within Derivative Works that You distribute, alongside + or as an addendum to the NOTICE text from the Work, provided + that such additional attribution notices cannot be construed + as modifying the License. + + You may add Your own copyright statement to Your modifications and + may provide additional or different license terms and conditions + for use, reproduction, or distribution of Your modifications, or + for any such Derivative Works as a whole, provided Your use, + reproduction, and distribution of the Work otherwise complies with + the conditions stated in this License. + + 5. Submission of Contributions. Unless You explicitly state otherwise, + any Contribution intentionally submitted for inclusion in the Work + by You to the Licensor shall be under the terms and conditions of + this License, without any additional terms or conditions. + Notwithstanding the above, nothing herein shall supersede or modify + the terms of any separate license agreement you may have executed + with Licensor regarding such Contributions. + + 6. Trademarks. This License does not grant permission to use the trade + names, trademarks, service marks, or product names of the Licensor, + except as required for reasonable and customary use in describing the + origin of the Work and reproducing the content of the NOTICE file. + + 7. Disclaimer of Warranty. Unless required by applicable law or + agreed to in writing, Licensor provides the Work (and each + Contributor provides its Contributions) on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or + implied, including, without limitation, any warranties or conditions + of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A + PARTICULAR PURPOSE. You are solely responsible for determining the + appropriateness of using or redistributing the Work and assume any + risks associated with Your exercise of permissions under this License. + + 8. Limitation of Liability. In no event and under no legal theory, + whether in tort (including negligence), contract, or otherwise, + unless required by applicable law (such as deliberate and grossly + negligent acts) or agreed to in writing, shall any Contributor be + liable to You for damages, including any direct, indirect, special, + incidental, or consequential damages of any character arising as a + result of this License or out of the use or inability to use the + Work (including but not limited to damages for loss of goodwill, + work stoppage, computer failure or malfunction, or any and all + other commercial damages or losses), even if such Contributor + has been advised of the possibility of such damages. + + 9. Accepting Warranty or Additional Liability. While redistributing + the Work or Derivative Works thereof, You may choose to offer, + and charge a fee for, acceptance of support, warranty, indemnity, + or other liability obligations and/or rights consistent with this + License. However, in accepting such obligations, You may act only + on Your own behalf and on Your sole responsibility, not on behalf + of any other Contributor, and only if You agree to indemnify, + defend, and hold each Contributor harmless for any liability + incurred by, or claims asserted against, such Contributor by reason + of your accepting any such warranty or additional liability. + + END OF TERMS AND CONDITIONS + + APPENDIX: How to apply the Apache License to your work. + + To apply the Apache License to your work, attach the following + boilerplate notice, with the fields enclosed by brackets "[]" + replaced with your own identifying information. (Don't include + the brackets!) The text should be enclosed in the appropriate + comment syntax for the file format. We also recommend that a + file or class name and description of purpose be included on the + same "printed page" as the copyright notice for easier + identification within third-party archives. + + Copyright [yyyy] [name of copyright owner] + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. diff --git a/licenses/LICENSE-snappy-java-1.1.4.txt b/licenses/LICENSE-snappy-java-1.1.4.txt new file mode 100644 index 000000000000..d64569567334 --- /dev/null +++ b/licenses/LICENSE-snappy-java-1.1.4.txt @@ -0,0 +1,202 @@ + + Apache License + Version 2.0, January 2004 + http://www.apache.org/licenses/ + + TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION + + 1. Definitions. + + "License" shall mean the terms and conditions for use, reproduction, + and distribution as defined by Sections 1 through 9 of this document. + + "Licensor" shall mean the copyright owner or entity authorized by + the copyright owner that is granting the License. + + "Legal Entity" shall mean the union of the acting entity and all + other entities that control, are controlled by, or are under common + control with that entity. For the purposes of this definition, + "control" means (i) the power, direct or indirect, to cause the + direction or management of such entity, whether by contract or + otherwise, or (ii) ownership of fifty percent (50%) or more of the + outstanding shares, or (iii) beneficial ownership of such entity. + + "You" (or "Your") shall mean an individual or Legal Entity + exercising permissions granted by this License. + + "Source" form shall mean the preferred form for making modifications, + including but not limited to software source code, documentation + source, and configuration files. + + "Object" form shall mean any form resulting from mechanical + transformation or translation of a Source form, including but + not limited to compiled object code, generated documentation, + and conversions to other media types. + + "Work" shall mean the work of authorship, whether in Source or + Object form, made available under the License, as indicated by a + copyright notice that is included in or attached to the work + (an example is provided in the Appendix below). + + "Derivative Works" shall mean any work, whether in Source or Object + form, that is based on (or derived from) the Work and for which the + editorial revisions, annotations, elaborations, or other modifications + represent, as a whole, an original work of authorship. For the purposes + of this License, Derivative Works shall not include works that remain + separable from, or merely link (or bind by name) to the interfaces of, + the Work and Derivative Works thereof. + + "Contribution" shall mean any work of authorship, including + the original version of the Work and any modifications or additions + to that Work or Derivative Works thereof, that is intentionally + submitted to Licensor for inclusion in the Work by the copyright owner + or by an individual or Legal Entity authorized to submit on behalf of + the copyright owner. For the purposes of this definition, "submitted" + means any form of electronic, verbal, or written communication sent + to the Licensor or its representatives, including but not limited to + communication on electronic mailing lists, source code control systems, + and issue tracking systems that are managed by, or on behalf of, the + Licensor for the purpose of discussing and improving the Work, but + excluding communication that is conspicuously marked or otherwise + designated in writing by the copyright owner as "Not a Contribution." + + "Contributor" shall mean Licensor and any individual or Legal Entity + on behalf of whom a Contribution has been received by Licensor and + subsequently incorporated within the Work. + + 2. Grant of Copyright License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + copyright license to reproduce, prepare Derivative Works of, + publicly display, publicly perform, sublicense, and distribute the + Work and such Derivative Works in Source or Object form. + + 3. Grant of Patent License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + (except as stated in this section) patent license to make, have made, + use, offer to sell, sell, import, and otherwise transfer the Work, + where such license applies only to those patent claims licensable + by such Contributor that are necessarily infringed by their + Contribution(s) alone or by combination of their Contribution(s) + with the Work to which such Contribution(s) was submitted. If You + institute patent litigation against any entity (including a + cross-claim or counterclaim in a lawsuit) alleging that the Work + or a Contribution incorporated within the Work constitutes direct + or contributory patent infringement, then any patent licenses + granted to You under this License for that Work shall terminate + as of the date such litigation is filed. + + 4. Redistribution. You may reproduce and distribute copies of the + Work or Derivative Works thereof in any medium, with or without + modifications, and in Source or Object form, provided that You + meet the following conditions: + + (a) You must give any other recipients of the Work or + Derivative Works a copy of this License; and + + (b) You must cause any modified files to carry prominent notices + stating that You changed the files; and + + (c) You must retain, in the Source form of any Derivative Works + that You distribute, all copyright, patent, trademark, and + attribution notices from the Source form of the Work, + excluding those notices that do not pertain to any part of + the Derivative Works; and + + (d) If the Work includes a "NOTICE" text file as part of its + distribution, then any Derivative Works that You distribute must + include a readable copy of the attribution notices contained + within such NOTICE file, excluding those notices that do not + pertain to any part of the Derivative Works, in at least one + of the following places: within a NOTICE text file distributed + as part of the Derivative Works; within the Source form or + documentation, if provided along with the Derivative Works; or, + within a display generated by the Derivative Works, if and + wherever such third-party notices normally appear. The contents + of the NOTICE file are for informational purposes only and + do not modify the License. You may add Your own attribution + notices within Derivative Works that You distribute, alongside + or as an addendum to the NOTICE text from the Work, provided + that such additional attribution notices cannot be construed + as modifying the License. + + You may add Your own copyright statement to Your modifications and + may provide additional or different license terms and conditions + for use, reproduction, or distribution of Your modifications, or + for any such Derivative Works as a whole, provided Your use, + reproduction, and distribution of the Work otherwise complies with + the conditions stated in this License. + + 5. Submission of Contributions. Unless You explicitly state otherwise, + any Contribution intentionally submitted for inclusion in the Work + by You to the Licensor shall be under the terms and conditions of + this License, without any additional terms or conditions. + Notwithstanding the above, nothing herein shall supersede or modify + the terms of any separate license agreement you may have executed + with Licensor regarding such Contributions. + + 6. Trademarks. This License does not grant permission to use the trade + names, trademarks, service marks, or product names of the Licensor, + except as required for reasonable and customary use in describing the + origin of the Work and reproducing the content of the NOTICE file. + + 7. Disclaimer of Warranty. Unless required by applicable law or + agreed to in writing, Licensor provides the Work (and each + Contributor provides its Contributions) on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or + implied, including, without limitation, any warranties or conditions + of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A + PARTICULAR PURPOSE. You are solely responsible for determining the + appropriateness of using or redistributing the Work and assume any + risks associated with Your exercise of permissions under this License. + + 8. Limitation of Liability. In no event and under no legal theory, + whether in tort (including negligence), contract, or otherwise, + unless required by applicable law (such as deliberate and grossly + negligent acts) or agreed to in writing, shall any Contributor be + liable to You for damages, including any direct, indirect, special, + incidental, or consequential damages of any character arising as a + result of this License or out of the use or inability to use the + Work (including but not limited to damages for loss of goodwill, + work stoppage, computer failure or malfunction, or any and all + other commercial damages or losses), even if such Contributor + has been advised of the possibility of such damages. + + 9. Accepting Warranty or Additional Liability. While redistributing + the Work or Derivative Works thereof, You may choose to offer, + and charge a fee for, acceptance of support, warranty, indemnity, + or other liability obligations and/or rights consistent with this + License. However, in accepting such obligations, You may act only + on Your own behalf and on Your sole responsibility, not on behalf + of any other Contributor, and only if You agree to indemnify, + defend, and hold each Contributor harmless for any liability + incurred by, or claims asserted against, such Contributor by reason + of your accepting any such warranty or additional liability. + + END OF TERMS AND CONDITIONS + + APPENDIX: How to apply the Apache License to your work. + + To apply the Apache License to your work, attach the following + boilerplate notice, with the fields enclosed by brackets "[]" + replaced with your own identifying information. (Don't include + the brackets!) The text should be enclosed in the appropriate + comment syntax for the file format. We also recommend that a + file or class name and description of purpose be included on the + same "printed page" as the copyright notice for easier + identification within third-party archives. + + Copyright [yyyy] [name of copyright owner] + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. diff --git a/licenses/LICENSE-zookeeper-3.4.8.txt b/licenses/LICENSE-zookeeper-3.4.8.txt new file mode 100644 index 000000000000..d64569567334 --- /dev/null +++ b/licenses/LICENSE-zookeeper-3.4.8.txt @@ -0,0 +1,202 @@ + + Apache License + Version 2.0, January 2004 + http://www.apache.org/licenses/ + + TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION + + 1. Definitions. + + "License" shall mean the terms and conditions for use, reproduction, + and distribution as defined by Sections 1 through 9 of this document. + + "Licensor" shall mean the copyright owner or entity authorized by + the copyright owner that is granting the License. + + "Legal Entity" shall mean the union of the acting entity and all + other entities that control, are controlled by, or are under common + control with that entity. For the purposes of this definition, + "control" means (i) the power, direct or indirect, to cause the + direction or management of such entity, whether by contract or + otherwise, or (ii) ownership of fifty percent (50%) or more of the + outstanding shares, or (iii) beneficial ownership of such entity. + + "You" (or "Your") shall mean an individual or Legal Entity + exercising permissions granted by this License. + + "Source" form shall mean the preferred form for making modifications, + including but not limited to software source code, documentation + source, and configuration files. + + "Object" form shall mean any form resulting from mechanical + transformation or translation of a Source form, including but + not limited to compiled object code, generated documentation, + and conversions to other media types. + + "Work" shall mean the work of authorship, whether in Source or + Object form, made available under the License, as indicated by a + copyright notice that is included in or attached to the work + (an example is provided in the Appendix below). + + "Derivative Works" shall mean any work, whether in Source or Object + form, that is based on (or derived from) the Work and for which the + editorial revisions, annotations, elaborations, or other modifications + represent, as a whole, an original work of authorship. For the purposes + of this License, Derivative Works shall not include works that remain + separable from, or merely link (or bind by name) to the interfaces of, + the Work and Derivative Works thereof. + + "Contribution" shall mean any work of authorship, including + the original version of the Work and any modifications or additions + to that Work or Derivative Works thereof, that is intentionally + submitted to Licensor for inclusion in the Work by the copyright owner + or by an individual or Legal Entity authorized to submit on behalf of + the copyright owner. For the purposes of this definition, "submitted" + means any form of electronic, verbal, or written communication sent + to the Licensor or its representatives, including but not limited to + communication on electronic mailing lists, source code control systems, + and issue tracking systems that are managed by, or on behalf of, the + Licensor for the purpose of discussing and improving the Work, but + excluding communication that is conspicuously marked or otherwise + designated in writing by the copyright owner as "Not a Contribution." + + "Contributor" shall mean Licensor and any individual or Legal Entity + on behalf of whom a Contribution has been received by Licensor and + subsequently incorporated within the Work. + + 2. Grant of Copyright License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + copyright license to reproduce, prepare Derivative Works of, + publicly display, publicly perform, sublicense, and distribute the + Work and such Derivative Works in Source or Object form. + + 3. Grant of Patent License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + (except as stated in this section) patent license to make, have made, + use, offer to sell, sell, import, and otherwise transfer the Work, + where such license applies only to those patent claims licensable + by such Contributor that are necessarily infringed by their + Contribution(s) alone or by combination of their Contribution(s) + with the Work to which such Contribution(s) was submitted. If You + institute patent litigation against any entity (including a + cross-claim or counterclaim in a lawsuit) alleging that the Work + or a Contribution incorporated within the Work constitutes direct + or contributory patent infringement, then any patent licenses + granted to You under this License for that Work shall terminate + as of the date such litigation is filed. + + 4. Redistribution. You may reproduce and distribute copies of the + Work or Derivative Works thereof in any medium, with or without + modifications, and in Source or Object form, provided that You + meet the following conditions: + + (a) You must give any other recipients of the Work or + Derivative Works a copy of this License; and + + (b) You must cause any modified files to carry prominent notices + stating that You changed the files; and + + (c) You must retain, in the Source form of any Derivative Works + that You distribute, all copyright, patent, trademark, and + attribution notices from the Source form of the Work, + excluding those notices that do not pertain to any part of + the Derivative Works; and + + (d) If the Work includes a "NOTICE" text file as part of its + distribution, then any Derivative Works that You distribute must + include a readable copy of the attribution notices contained + within such NOTICE file, excluding those notices that do not + pertain to any part of the Derivative Works, in at least one + of the following places: within a NOTICE text file distributed + as part of the Derivative Works; within the Source form or + documentation, if provided along with the Derivative Works; or, + within a display generated by the Derivative Works, if and + wherever such third-party notices normally appear. The contents + of the NOTICE file are for informational purposes only and + do not modify the License. You may add Your own attribution + notices within Derivative Works that You distribute, alongside + or as an addendum to the NOTICE text from the Work, provided + that such additional attribution notices cannot be construed + as modifying the License. + + You may add Your own copyright statement to Your modifications and + may provide additional or different license terms and conditions + for use, reproduction, or distribution of Your modifications, or + for any such Derivative Works as a whole, provided Your use, + reproduction, and distribution of the Work otherwise complies with + the conditions stated in this License. + + 5. Submission of Contributions. Unless You explicitly state otherwise, + any Contribution intentionally submitted for inclusion in the Work + by You to the Licensor shall be under the terms and conditions of + this License, without any additional terms or conditions. + Notwithstanding the above, nothing herein shall supersede or modify + the terms of any separate license agreement you may have executed + with Licensor regarding such Contributions. + + 6. Trademarks. This License does not grant permission to use the trade + names, trademarks, service marks, or product names of the Licensor, + except as required for reasonable and customary use in describing the + origin of the Work and reproducing the content of the NOTICE file. + + 7. Disclaimer of Warranty. Unless required by applicable law or + agreed to in writing, Licensor provides the Work (and each + Contributor provides its Contributions) on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or + implied, including, without limitation, any warranties or conditions + of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A + PARTICULAR PURPOSE. You are solely responsible for determining the + appropriateness of using or redistributing the Work and assume any + risks associated with Your exercise of permissions under this License. + + 8. Limitation of Liability. In no event and under no legal theory, + whether in tort (including negligence), contract, or otherwise, + unless required by applicable law (such as deliberate and grossly + negligent acts) or agreed to in writing, shall any Contributor be + liable to You for damages, including any direct, indirect, special, + incidental, or consequential damages of any character arising as a + result of this License or out of the use or inability to use the + Work (including but not limited to damages for loss of goodwill, + work stoppage, computer failure or malfunction, or any and all + other commercial damages or losses), even if such Contributor + has been advised of the possibility of such damages. + + 9. Accepting Warranty or Additional Liability. While redistributing + the Work or Derivative Works thereof, You may choose to offer, + and charge a fee for, acceptance of support, warranty, indemnity, + or other liability obligations and/or rights consistent with this + License. However, in accepting such obligations, You may act only + on Your own behalf and on Your sole responsibility, not on behalf + of any other Contributor, and only if You agree to indemnify, + defend, and hold each Contributor harmless for any liability + incurred by, or claims asserted against, such Contributor by reason + of your accepting any such warranty or additional liability. + + END OF TERMS AND CONDITIONS + + APPENDIX: How to apply the Apache License to your work. + + To apply the Apache License to your work, attach the following + boilerplate notice, with the fields enclosed by brackets "[]" + replaced with your own identifying information. (Don't include + the brackets!) The text should be enclosed in the appropriate + comment syntax for the file format. We also recommend that a + file or class name and description of purpose be included on the + same "printed page" as the copyright notice for easier + identification within third-party archives. + + Copyright [yyyy] [name of copyright owner] + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. diff --git a/licenses/licenses.html b/licenses/licenses.html new file mode 100644 index 000000000000..d0ac6a63f418 --- /dev/null +++ b/licenses/licenses.html @@ -0,0 +1,129 @@ + +

License Report


+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
ArtifactTypeVersionLicense(s)
jackson-annotations-2.8.0jar2.8.0Apache 2.0
jackson-core-2.8.5jar2.8.5Apache 2.0
jackson-databind-2.8.5jar2.8.5Apache 2.0
generex-1.0.2jar1.0.2Apache 2.0
jsr305-3.0.2jar3.0.2Apache 2.0
avro-1.8.1jar11.0.2Apache 2.0
paranamer-2.7jar2.7LICENSE.txt
metrics-core-2.2.0jar2.2.0
slice-0.29jar0.29
common-config-3.3.0jar3.3.0
common-utils-3.3.0jar3.3.0
kafka-avro-serializer-3.3.0jar3.3.0
kafka-connect-avro-converter-3.3.0jar3.3.0
kafka-schema-registry-client-3.3.0jar3.3.0
ksql-core-0.1-SNAPSHOTjar0.1-SNAPSHOT
ksql-examples-0.1-SNAPSHOTjar0.1-SNAPSHOT
netty-3.7.0.Finaljar3.7.0.FinalApache 2.0
jline-0.9.94jar0.9.94BSD
log4j-1.2.17jar1.2.17Apache 2.0
jopt-simple-5.0.3jar5.0.3The MIT License
antlr4-runtime-4.7jar4.7link from artifact (META-INF/MANIFEST.MF)
commons-compress-1.8.1jar1.8.1Apache 2.0
commons-csv-1.4jar1.4Apache 2.0
commons-compiler-3.0.7jar3.0.7
janino-3.0.7jar3.0.7
jol-core-0.2jar0.2included file
slf4j-api-1.7.21jar1.7.21
slf4j-log4j12-1.7.21jar1.7.21
automaton-1.11-8jar
connect-api-0.11.0.0-cp1jarincluded file
connect-json-0.11.0.0-cp1jarincluded file
hamcrest-core-1.3jarincluded file
jackson-core-asl-1.9.13jar1.9.13Apache 2.0
jackson-mapper-asl-1.9.13jar1.9.13Apache 2.0
junit-4.12jarincluded file
kafka-clients-0.11.0.0-cp1jarincluded file
kafka-streams-0.11.0.0-cp1jarincluded file
kafka_2.11-0.11.0.0-cp1jarincluded file
lz4-1.3.0jar1.3.0
rocksdbjni-5.0.1jar
scala-library-2.11.11jar2.11.11.v20170413-090219-8a413ba7cc
scala-parser-combinators_2.11-1.0.4jar1.0.4
snappy-java-1.1.1.3jar1.1.1.3Apache 2.0
xz-1.5jar1.5
zkclient-0.10jar0.10
zookeeper-3.4.8jar3.4.8Apache 2.0
+ diff --git a/notices/NOTICE-avro-1.8.1.txt b/notices/NOTICE-avro-1.8.1.txt new file mode 100644 index 000000000000..f868e810c14b --- /dev/null +++ b/notices/NOTICE-avro-1.8.1.txt @@ -0,0 +1,8 @@ + +Apache Avro +Copyright 2009-2016 The Apache Software Foundation + +This product includes software developed at +The Apache Software Foundation (http://www.apache.org/). + + diff --git a/notices/NOTICE-jackson-core-2.8.5.txt b/notices/NOTICE-jackson-core-2.8.5.txt new file mode 100644 index 000000000000..4c976b7b4cc5 --- /dev/null +++ b/notices/NOTICE-jackson-core-2.8.5.txt @@ -0,0 +1,20 @@ +# Jackson JSON processor + +Jackson is a high-performance, Free/Open Source JSON processing library. +It was originally written by Tatu Saloranta (tatu.saloranta@iki.fi), and has +been in development since 2007. +It is currently developed by a community of developers, as well as supported +commercially by FasterXML.com. + +## Licensing + +Jackson core and extension components may licensed under different licenses. +To find the details that apply to this artifact see the accompanying LICENSE file. +For more information, including possible other licensing options, contact +FasterXML.com (http://fasterxml.com). + +## Credits + +A list of contributors may be found from CREDITS file, which is included +in some artifacts (usually source distributions); but is always available +from the source code management (SCM) system project uses. diff --git a/notices/NOTICE-jackson-core-asl-1.9.13.txt b/notices/NOTICE-jackson-core-asl-1.9.13.txt new file mode 100644 index 000000000000..0cae638a15e3 --- /dev/null +++ b/notices/NOTICE-jackson-core-asl-1.9.13.txt @@ -0,0 +1,7 @@ +This product currently only contains code developed by authors +of specific components, as identified by the source code files; +if such notes are missing files have been created by +Tatu Saloranta. + +For additional credits (generally to people who reported problems) +see CREDITS file. diff --git a/notices/NOTICE-jackson-databind-2.8.5.txt b/notices/NOTICE-jackson-databind-2.8.5.txt new file mode 100644 index 000000000000..5ab1e5636037 --- /dev/null +++ b/notices/NOTICE-jackson-databind-2.8.5.txt @@ -0,0 +1,20 @@ +# Jackson JSON processor + +Jackson is a high-performance, Free/Open Source JSON processing library. +It was originally written by Tatu Saloranta (tatu.saloranta@iki.fi), and has +been in development since 2007. +It is currently developed by a community of developers, as well as supported +commercially by FasterXML.com. + +## Licensing + +Jackson core and extension components may be licensed under different licenses. +To find the details that apply to this artifact see the accompanying LICENSE file. +For more information, including possible other licensing options, contact +FasterXML.com (http://fasterxml.com). + +## Credits + +A list of contributors may be found from CREDITS file, which is included +in some artifacts (usually source distributions); but is always available +from the source code management (SCM) system project uses. diff --git a/notices/NOTICE-jackson-mapper-asl-1.9.13.txt b/notices/NOTICE-jackson-mapper-asl-1.9.13.txt new file mode 100644 index 000000000000..0cae638a15e3 --- /dev/null +++ b/notices/NOTICE-jackson-mapper-asl-1.9.13.txt @@ -0,0 +1,7 @@ +This product currently only contains code developed by authors +of specific components, as identified by the source code files; +if such notes are missing files have been created by +Tatu Saloranta. + +For additional credits (generally to people who reported problems) +see CREDITS file. diff --git a/notices/NOTICE-log4j-1.2.17.txt b/notices/NOTICE-log4j-1.2.17.txt new file mode 100644 index 000000000000..037573236004 --- /dev/null +++ b/notices/NOTICE-log4j-1.2.17.txt @@ -0,0 +1,5 @@ +Apache log4j +Copyright 2007 The Apache Software Foundation + +This product includes software developed at +The Apache Software Foundation (http://www.apache.org/). \ No newline at end of file diff --git a/pom.xml b/pom.xml new file mode 100644 index 000000000000..f6894abc63e0 --- /dev/null +++ b/pom.xml @@ -0,0 +1,232 @@ + + + 4.0.0 + + + io.confluent + common + 3.3.1-SNAPSHOT + + + io.confluent.ksql + ksql-parent + pom + 0.1-SNAPSHOT + + ksql-core + ksql-cli + ksql-examples + ksql-rest-app + + + + + confluent + http://packages.confluent.io/maven/ + + + + + 0.29 + 2.2.0 + 4.7 + 1.8.1 + 1.4 + + ${project.version} + 21.0 + 1 + 3.0.7 + 1.8 + 3.3.1 + 4.4.0 + 3.0.2 + 1.5.0 + 3.3.1-SNAPSHOT + 1.2.1 + 1.3.4 + 1.0.2 + + + + + + + + io.confluent.ksql + ksql-cli + ${project.version} + + + + io.confluent.ksql + ksql-core + ${project.version} + + + + io.confluent.ksql + ksql-examples + ${project.version} + + + + io.confluent.ksql + ksql-rest-app + ${project.version} + + + + + + io.confluent + kafka-connect-avro-converter + ${confluent.version} + + + + io.confluent + kafka-json-serializer + ${confluent.version} + + + + io.confluent + rest-utils + ${confluent.version} + + + + + + com.github.rvesse + airline + ${airline.version} + + + + com.google.code.findbugs + jsr305 + ${jsr305.version} + + + + com.google.guava + guava + ${guava.version} + + + + io.airlift + slice + ${airlift.version} + + + + javax.inject + javax.inject + ${inject.version} + + + + net.java.dev.jna + jna + ${jna.version} + + + + org.antlr + antlr4-runtime + ${antlr.version} + + + + org.apache.avro + avro + ${avro.version} + + + + org.apache.commons + commons-csv + ${csv.version} + + + + org.codehaus.janino + janino + ${janino.version} + + + + org.jline + jline + ${jline.version} + + + + + + + + org.slf4j + slf4j-api + + + + org.slf4j + slf4j-log4j12 + + + + + + + + maven-assembly-plugin + + + src/assembly/package.xml + + + + + + org.apache.maven.plugins + maven-checkstyle-plugin + + false + false + + + + + net.alchim31.maven + scala-maven-plugin + 3.2.1 + + + + + + + org.apache.maven.plugins + maven-compiler-plugin + + ${java.version} + ${java.version} + + + + + org.apache.maven.plugins + maven-checkstyle-plugin + + checkstyle/suppressions.xml + + + + + diff --git a/quickstart/docker-compose.yml b/quickstart/docker-compose.yml new file mode 100644 index 000000000000..f2c04702af0e --- /dev/null +++ b/quickstart/docker-compose.yml @@ -0,0 +1,131 @@ +--- +version: '2' +services: + zookeeper: + image: "confluentinc/cp-zookeeper:latest" + hostname: zookeeper + ports: + - '32181:32181' + environment: + ZOOKEEPER_CLIENT_PORT: 32181 + ZOOKEEPER_TICK_TIME: 2000 + extra_hosts: + - "moby:127.0.0.1" + + kafka: + image: "confluentinc/cp-enterprise-kafka:latest" + hostname: kafka + ports: + - '9092:9092' + - '29092:29092' + depends_on: + - zookeeper + environment: + KAFKA_BROKER_ID: 1 + KAFKA_ZOOKEEPER_CONNECT: zookeeper:32181 + KAFKA_LISTENER_SECURITY_PROTOCOL_MAP: PLAINTEXT:PLAINTEXT,PLAINTEXT_HOST:PLAINTEXT + KAFKA_INTER_BROKER_LISTENER_NAME: PLAINTEXT + KAFKA_ADVERTISED_LISTENERS: PLAINTEXT://kafka:29092,PLAINTEXT_HOST://localhost:9092 + KAFKA_AUTO_CREATE_TOPICS_ENABLE: "true" + KAFKA_METRIC_REPORTERS: io.confluent.metrics.reporter.ConfluentMetricsReporter + KAFKA_OFFSETS_TOPIC_REPLICATION_FACTOR: 1 + CONFLUENT_METRICS_REPORTER_BOOTSTRAP_SERVERS: kafka:29092 + CONFLUENT_METRICS_REPORTER_ZOOKEEPER_CONNECT: zookeeper:32181 + CONFLUENT_METRICS_REPORTER_TOPIC_REPLICAS: 1 + CONFLUENT_METRICS_ENABLE: 'true' + CONFLUENT_SUPPORT_CUSTOMER_ID: 'anonymous' + extra_hosts: + - "moby:127.0.0.1" + + schema-registry: + image: "confluentinc/cp-schema-registry:latest" + hostname: schema-registry + depends_on: + - zookeeper + - kafka + ports: + - '8081:8081' + environment: + SCHEMA_REGISTRY_HOST_NAME: schema-registry + SCHEMA_REGISTRY_KAFKASTORE_CONNECTION_URL: zookeeper:32181 + extra_hosts: + - "moby:127.0.0.1" + + # Runs the Kafka KSQL data generator for topic called "pageviews" + ksql-datagen-pageviews: + image: "confluentinc/ksql-examples:latest" + hostname: ksql-datagen-pageviews + depends_on: + - kafka + - schema-registry + # Note: The container's `run` script will perform the same readiness checks + # for Kafka and Confluent Schema Registry, but that's ok because they complete fast. + # The reason we check for readiness here is that we can insert a sleep time + # for topic creation before we start the application. + command: "bash -c 'echo Waiting for Kafka to be ready... && \ + cub kafka-ready -b kafka:29092 1 20 && \ + echo Waiting for Confluent Schema Registry to be ready... && \ + cub sr-ready schema-registry 8081 20 && \ + echo Waiting a few seconds for topic creation to finish... && \ + sleep 2 && \ + java -jar /usr/share/java/ksql-examples/ksql-examples-0.1-SNAPSHOT-standalone.jar + quickstart=pageviews format=delimited topic=pageviews bootstrap-server=kafka:29092 maxInterval=100 iterations=1000 && \ + java -jar /usr/share/java/ksql-examples/ksql-examples-0.1-SNAPSHOT-standalone.jar + quickstart=pageviews format=delimited topic=pageviews bootstrap-server=kafka:29092 maxInterval=1000'" + environment: + KSQL_CONFIG_DIR: "/etc/ksql" + KSQL_LOG4J_OPTS: "-Dlog4j.configuration=file:/etc/ksql/log4j-rolling.properties" + STREAMS_BOOTSTRAP_SERVERS: kafka:29092 + STREAMS_SCHEMA_REGISTRY_HOST: schema-registry + STREAMS_SCHEMA_REGISTRY_PORT: 8081 + extra_hosts: + - "moby:127.0.0.1" + + # Runs the Kafka KSQL data generator for topic called "users" + ksql-datagen-users: + image: "confluentinc/ksql-examples:latest" + hostname: ksql-datagen-users + depends_on: + - kafka + - schema-registry + # Note: The container's `run` script will perform the same readiness checks + # for Kafka and Confluent Schema Registry, but that's ok because they complete fast. + # The reason we check for readiness here is that we can insert a sleep time + # for topic creation before we start the application. + command: "bash -c 'echo Waiting for Kafka to be ready... && \ + cub kafka-ready -b kafka:29092 1 20 && \ + echo Waiting for Confluent Schema Registry to be ready... && \ + cub sr-ready schema-registry 8081 20 && \ + echo Waiting a few seconds for topic creation to finish... && \ + sleep 2 && \ + java -jar /usr/share/java/ksql-examples/ksql-examples-0.1-SNAPSHOT-standalone.jar + quickstart=users format=json topic=users bootstrap-server=kafka:29092 maxInterval=100 iterations=1000 && \ + java -jar /usr/share/java/ksql-examples/ksql-examples-0.1-SNAPSHOT-standalone.jar + quickstart=users format=json topic=users bootstrap-server=kafka:29092 maxInterval=1000'" + environment: + KSQL_CONFIG_DIR: "/etc/ksql" + KSQL_LOG4J_OPTS: "-Dlog4j.configuration=file:/etc/ksql/log4j-rolling.properties" + STREAMS_BOOTSTRAP_SERVERS: kafka:29092 + STREAMS_SCHEMA_REGISTRY_HOST: schema-registry + STREAMS_SCHEMA_REGISTRY_PORT: 8081 + extra_hosts: + - "moby:127.0.0.1" + + # Runs the Kafka KSQL application + ksql-cli: + image: "confluentinc/ksql-cli:latest" + hostname: ksql-cli + depends_on: + - kafka + - schema-registry + - ksql-datagen-pageviews + - ksql-datagen-users + command: "perl -e 'while(1){ sleep 99999 }'" + environment: + KSQL_CONFIG_DIR: "/etc/ksql" + KSQL_LOG4J_OPTS: "-Dlog4j.configuration=file:/etc/ksql/log4j-rolling.properties" + STREAMS_BOOTSTRAP_SERVERS: kafka:29092 + STREAMS_SCHEMA_REGISTRY_HOST: schema-registry + STREAMS_SCHEMA_REGISTRY_PORT: 8081 + extra_hosts: + - "moby:127.0.0.1" diff --git a/quickstart/ksql-quickstart-schemas.jpg b/quickstart/ksql-quickstart-schemas.jpg new file mode 100644 index 0000000000000000000000000000000000000000..5efdeceafc74c92c8b9aa4b7eebccf1c152cb6f4 GIT binary patch literal 59921 zcmeFZcUTn5);HSZoP$V~ASeQoGb1Wl7y-$`ND>BwAt!+`q69%vKtMrIK|nwx$06qo zf`DWJQIHwHVQ|uI+}nNLcc1V1&Ux?s{@*I0*m%JwQXj2~dHE0{j3J`~b}_3;-q+0)Jsk3h6&- zC;{Le9H9P_#sRz@Ca~ti(tlnlUr_$0piF#0^&3+d9g>ol7>;=b2m5Qv%lie%xws=- zJ>=XFzVhKN{_+ZPr{w`{NVva?n~z7ZsH=x3+)qbry8$I83U}8LvsE!WZRUT@!wYT{ z8R%gZX@1Eq(#K81T@0ctsvWKw?(6UC5$qxw?t9HINHbhV>{sQQAU@2N7Zd$O66~WR zW@mO$^c*74LsUghS?;tLSUS-Cs;1?6gFnlH-*m+OWHKx)OfF1G4iV@nub`o!A%9v? zUQtmNq>v4Y@C$Ycm-P!0|6Rj*k07@|xPLGl;U{`nql+sdBv?mGOdfoK{GX3Fgn#j> zEBc@G9}WCR1OL&$e>CtP4g5y~|Nqm#Kdc=OKhVSp18o_AyalLV^g;w9g1iv^qDpe7 z0rj)SX4Hp97npuw$zPCtLMU-M8F){^`>B zF8==4DA@tP*DpBG((tUPoxOu7-4Z|t@PHq6K+?r6$Y1Z`#S4dZ{=WXI|F08v@|SrB zjLRO@^;h=42RPh8vk0^gFM{N*?m=$8Ag%-eYCbprz+eEN836Oe!-D+}F&f0&fgpnb zKzk@Jx91<2?-0BGflvLSxny|`q&Wjxu*@zlft~=sddR!hgsDmINPs9v%_ZX3 zm;aL|-)kYDzJ4V!@XYKLXk-aqxk0_X@j;j#VrCFG`nj4zLCgu_9yln`pLoVA#K;=N zhxD6nLFW#QbY1|U1l(QDn}S#n#JnLs)_Q+pMVCMj{i3HdhX)&9`V+ebA}s%`pEA_% zn)#uA59_(_o?50s%-;RePa23s0sWu(_7TM&bMQg(#GZOlQ;3gVF< zAFD$i4(pkZ^g4H#|BEIt#PX152{3=}YM{ZPj35mapKlO|e$`JU=jQV3SzueBIuq=5 z>Cl}3%Tn2S1lb+(^NR;}kMsH<26>=z_wcj+lQ$}`b)7w|^RR5J|FvIj9i*qq_PAzv zm=Efi>RnK%)$g)n!GV_!^FbX_Eql9|9JbxVXJP$=FCOX#Ijiqz`V8o$elvi;_hR-g7ftr4vO z?TOzxfE&p5HSh=o;Qxy}Q}CV#C|w|U_WLcvKkB1dqZy@Hrujg#NHY!C0H1%=@~5OQ z_^rU-9Q;+Y8L$Y}_J>wHK#lnU8^39&kAqyRP@kpN0r>}cr{<;R2V|+0K}qzeA)st3 zVD4}29daG=7jOU2X8B8_KA;Tn-!!L-Pj8$i z{7L^?0vGV^;qwpQKv1wpcre)ZfO81{h(NffSForaI2wD18vD7)of1_%t)K<~hhzI8 z4FK%&{u;9>c>Dg57tjI#8u8$`a5$`r9#R2-i#Pz(tpNZR_8)oTJ79Bv2mnnZZXto8 zf6!4MUT6SDfDP;wM*(3#9FPX(Q3O;0E#M5G510T@;1XaDID;DZ0(?Q9!+Kuf*i&4g@TBmg2%(6gxJ{8jkwTG4kxNlXQBF}u(Mr)vF-kE_ zu|%;!u|x5bl8Ta z)U?!G)S}d~VB0jJwx)KY4xo;qPNdGHenVYN-9bH0y+n zgwe#(q|xNlRMB*T{brR0OG``3ODjdI2KGyPT5sCxw28FOY0GF^Xh&&RXn)Yr(H*6e zq0^y*(z(zD)5X$d(7mNY(T&lq(&6Zt=tbxi=?&-|=>6z#(WlWD(KpkN(XZ3v8Q2*l z88jK74DJjO49N_y80r~@8CDtaj2w(oj5>^$7`+*9GCpN2W$a}9#JJ1E#3aU~!DPV% zXS%`kl<6H)57QFU0W&A_N#=9RmzhJDlb8#bTbQSrcUf3ij9IMpMY5%_Rj>`SZL%}4A7?+u?!q3!p2c3r zKE?ingNs9f1Ippck;qZR(aZ6LlaBK^r#`1U=N-uf?+l*@Up!wK-v_>*M@5h7AN4u<=xEK+Ieu#Xll&I^Vf@+ro&1{u z+yYtxZUXTF?*(QADFsgoS_)nld?`2}_(Moo$WX{%C_|`SXj7P1_>8c(@Dt%?;SCWk z5nU0u$P*Ei$X8JwQ9aRXqEAITMA63tj~N{cIhK2D=ons1O3Yg9mROnCoH(Pny7*P` zC*p15XbBMsGl?jPVu=|^21#{EFUfStZpr=QQpatN-#cD&{EHNyl(AH#RI$`2X%=aS zbbxf8^u!6;6Y3{?PCP#`a+30-%1N)2Sto~OC}mV-;4;r;Mo-b4(m3UJD(}>kEVHbh zY^ZFp?2;U>+y%K?ay4?_Jj+l;%POi?PuBh%6-50ux zkYf-RNFHSQjQAP%Gx=xM^-kz{>%GsW` zy>#|c@}*g8Y3mT{HXAmZ%Ql5J`?lw8Q*0OQUMxf`++ zsvDXWiVL#|s|e=^_X{70I2jQau^wp<`T9Ebb+_wnQOBZgMlD6_Md!v)#JI+^A;pn* zkgGQgZ@jt5c+=vS)(N;-2k4cYi*X zeI~m!M<^#D=jRLfi;ua6xz%~c^U_|@z6^c2_R8i}_iOdnh57vX3HihVzkMn$52wD`Ad zwtBX%w87eD+wIyXIxISdI!!x!x(vG7yY;$JJvu#&y_&ssed>KR{i^-d11bYmgDQhn zL#jj7!)n8|BN`+1quQg*W00}d@$=(d6UGz$AIv|DPFhcXoVq-<_|g62m+5QMn3>QS z{OpZSG@s(<*yqyb1?Te?PArrysxCGyonIPQwpyNDaa;NJIp{NC_0Ag0TH3nEdf^wv zFO3@p8zWyGzpj4s|Aya;-Qw7K_FejW<@TBF0kj=@6%&Xd?lIH!$13VPZa`B0O;xgfEY+7>ht#y}0PsTY5RI&UKfL_6L~a2%=qZdS6R9Y~07?!DDh>*A2Ot9SP6HYyVERo> zK}khTLrX``z{ms=RIvk;6jW4{)KoMyhmJZ$G{he!-iJ-vNn;}ahyr#?>4EG@5mUR_)NvhfwOv%B{LyN^5gc_T$ zVh%<7CE33x*scFtlKmyvKjfMPO=ikpiHZ_@OH@?go1z924IRy4qN4}RDlpOinHYa1 z&mzLEUCD@W05t0ns4?OYz97u%BGWox(!ojl3Cjmr!454 zW3iL(T70%Rmq3u&x6;i6Hxgd~)`JY>s*!<{ndxLe(t->yTj3SSz(^JuNHsGh11ZH{ zBW0kQ10sZjWD+H+C+GjOn*ATNA?rn072PHZEx)wTg5nDAZk<0$8lF@p1Mi|U*SGe^ zukDtVEFn{T?j^3Nx%55r6o1A~lqgGs_#zHeVMmdlnTcOSe&`j7#4 zWZMxeWUn|2A+x0^=!_s-@8{i`w~XFg5pW%^0$1H?atfjPcq*{>wgCHirC1M-u-`s)HRY%#(qEf4ikg|JVM1`&%j* zcuw3V45vZ<)yR?jj~1%?pQrwB`tpA?{C}=B*^Vrr&E#pq-If-*p);nf^Q`Yl!=rm- z0H+r-=)2ahI;`16@;q1zd$?xk)-UKumB{$Tdw7$L3`nWqp~z1+$w2;W-$Ch#)};*> z@sk%VqWay_4I-yiA}iibe4eSXT}%D|4g}5gKds24|C6~^R}Z(l;)Gb5YLn*-pK8yB zV$)KuJ=VkCQPi*PyB{X)A+_rEDd@nqxBf^c+pOAh&aD;Py+tRkGb4L%e0hyz<ULd8(r_3O(r zTIZH@gqfTinBr`-?n~B~J>}qP>UfO|sT4W=ym;5caN}uFO15sv(MkVAc$b5gPeeXZ zwSIad=3dX}gLflO;Je1XC1qo(u+`o2GIY?&eq4-v6x#$L;KdgOr|@>8-ObNqZjXCw zYEch(MqV0C8-2F5ioI6Y_g(!{RB=~~G_kgkZHZ|(F0z|&tPGxi)^9u5(Q7Tm>&4|G z5-C&>Zl~~a3@rzy!ZIDYOyN_VC>EqgP8=CH(x`S#R_=#vw0V|9c|E_RT9=>W0Km zeX0~y2`7Poc3UCmd>$J=6Ypp4Ka7u$`Bo}CEFl)R&?O5RS zJvw)Ctn8Iqu^J<<9Ur7hoHClt`b`@XJr1qs~CO+!G)o*Vupv`mGIi+%GnHHj`qpT47<_CyW%0TYT>FyeCK<@X?RR18Lo1fa07qrOMrfp6hVSv~Egm;gUDlRFTSEjZ zmM8>nvq3PnNhn{oSZ5AseD9_Mrx8tbaUF#7~b$QrM?QZk5{%OWk$JxZp&+i|6PHmdl zmYm#bsG0tls0}wrJz_027`oRGS>-X)?-Qm8jYn42+$Tt&7)fGZ6lTak8^wz1%@9~+ zhr`;)VEO%B9&D`LG>*~WOI8YfnK|)IjN(@a4T&#@9T#vV>+%G`cT>4%{oE;Ei8~D` z@qKpmkIHZF$N$*4?Acj%TUQWsWb@fPEn0cI*uks*l4kbJ(9OsF0a$${ahbe4B`8BM z%^AL!)Sa+)B;S0@ga{{zE!_FQ*-;*VOIc11+#~v~W);c2BON=IG%H2QRNwT8G8d~a z(Ps95n_C1#=aC>+5ouUBxH_&JlxXN3q1{7{2s&fF;b8?3Eyl3=@2Z7K@VJ za*I5FWFIQ0-O~DzQl<(m--YCJB8_D$;7+VaZ}E-|O?dNjzw2-fySE@dm}`0WAd&Lc5xiV8jI zrD52z=OnHuI1Y|KGka!dR_|23YLOUzn}OB(%%W1I;&6>rk#rIE2YO;-s!Tpq3I6T6 zfwAPkhtd_PZ!Vg_MRivBP3r=s**jrl_~WS!m%mtH3nn2;C9uVgTRq{hLDGP6wMb<& zYIBJUOd`6p37s_vSmYi=h~S2a&e=M)h`QCxnlxR4C=s!o^2p>(U8xp!qcLEm_O0gX zrOV*jfKknbD>=fZyF_M;Esrf*txRI{YGrQ8&IkA%35|eONe2w6=5!FX@F$(rI)@V% zIFVjj2@x*U*+#)SCywJs+u~#OWqNElIvLcpk9c>>buN2fDu^v2;&)MK^+E|Uu(jUr z*EhqzB5iTU)8_dz-#%2w^Rj&zGSHrBiV4Hsz%^hDyP8>XDr)zFh16R1kN0^_TCdhM zJa;zQz2A}L<{9_yEK6}>n%m7E>FqK(_tRroD<3bve*Pdmj@R`5_s8bGlS7aCzH!H; z1#0)SRWtMqAA|ORGhizE`Vw7|iqrKKx;EtfN8$3Fk-?A2037ylp~!*hDcg3;6kl)( zC;s&2Tb||vV+O6JFswcQI5HH^11R8gpb3ZmX1htS6j|x<+Tm(+Re?;^i`0f(H(^XeO|Un$DK1 zZA!M-&6Ag#4d8&u-UbTRHF{K*);k@fZbJ~!_KnaG-YnLJrR&#aG`?r`#T#e!1Yg{` zdoUoEkU|kq>2aSNj~Hh8aW>| z;C|v*Q0-}0d2W*5bMEzR%^~^0lkaWzR%`mY@4}eN)cC#>7VLg@Ad-Q&FELuUR!l;t zh_HM!O_YG|5ySNJ?~?ErC#qh<9bgJ#kJw#U*3hu%WpAMJI$UjC&4e zEj-z7S1GV?u_m$M9dkp}DmW|pIi()Vb@_+a?}|T=O$Wj8~=|zr3-;nTzty`yM?`U zQqMlK<>`tL)NzjJ?Zt%-ALQL^iL6fEnzWh6u(|#SM5N_+Emang41}Q1!?#Co_V#|W zT#tWk@LFFD;VJd@$@4Lrl&wjkioT>li1-nqh_O+7N5_JNg6#E#s1vwZyb~dC?8;>0 zk^JzOZnLr1?6CPcgB}%z#Jd^CY_86(cJ$FxGk-qFxKg~Kx!+lqgko=2#icJnT6=?8 z7^m7!ckHLvx4;W(AAKD}&sWlw>gk|8s!{jT(rR+eJ41o6e)Fc=xdL1|=gMlut zw;~dRPktbTEanIsF0l!Du}Nqq<>oI)TxD3K6`Kk`BpLM;aqn`h^w4RQcb{Kzaw)X7 z3anpCSu<|v-fSNAh39=A$UPZYY$g`H#I4JVk=9WOc)_TPn4FH`d-WCJ3m5O%HI9VDRn}Ii z+j=$J6@T!dm+@xLLc6bJf91Qml$iRmAO;5>ud}b!g2z65-}|<{FtF>R*Y+w`b{v@l z70rv6c`@+63o4xaCl{r}26+W}piwq+o%ETFs5)=khe6_>Av;jVuiJ}Y77D)p@>5V6 zx^Kv1|06#|kXUr>`!*TyTsS)YQBQVYXWD>M+Tzut=0Nxik^dw=HvjAj0JF25C(38RVjLDmU~A%+aA zrB8iKX`IFps!`T>o2V0qaitp$YBzN`ywYnNgp-T)R7j5s22WKC78GQO%MZC{TCBfs z)(sS3tGv6?8Rfacc5_}5O|yR|iUrMO2jg0>>-F$@*&g6H2=}*jHQBfDdmbC{v7cNo;K?6 zapr6q+Oh4XSUlgv$-FmqC@r&@9j!eX?3XExBe8VbZEzz-O zm9M&I&f?$!bm%RuZWVM{txY7B4Dh~vQI5NvJNvcY+qfn*p*?_gnTdWaF2$PplHbLT z*O+fR)%;7IEacf0kLtHs_^O501tXgAUz(S?V=L%;ja5SO*W+;l*ekfB==IC@IE&{BJ4&D9}m@rfp;Qef%$*PqFi{&)zYkCxRTNRwrj?_Gf;6Om9~w zIhz(Ee13cnKN5#@M^0~d)~`<8ugYQQLX>&HOZn%&fBzuyXlTt%P)UrkBgM!fB`{*G z2@JCEk7ysuo52OO^RmM;JyM?Bnw+K%cp*{VB4ny?s$*xsSR%!{`=9mkH;q{sChst+ zVHEw)i z63!>)G@{TVxMQSZx5nmO^JY6@73%YJ6u36vk0NR~*pB6(J47m{h=ScDI|l@~P%zbx zs=l{bju~I--W*>MIg0CENhciF)O}9Oz&<}h;t&v$-*S$5?Zb$CeJ()a(Ltcmd;fya z!Gc>x-CW%VHeyfS?R~0ymFW=T-Cy`xSfI`2+kJ51zB=Jf-^%K6LuK)(9tIcfj5tB zwk1TF9(nxv0Ja`b7zhL}<`j-Q6(}(Ux-GVJ{5tZ#2{p%$Nq}2c! zunm%PcJ}bC+0I=iX%n9|(?uCiy~aQi8kK~G3jKl1HKwctn{+Qm=35MIwpRdw`?5C^ zr(TDPfPUib7y+*nOF2~V({TPkg@>K=61~SLOnb%CA_VUO4HXIi^CBY{w*T)l=ReI_ z8)GYkThE>qcINL_a@H2KXC#g#e$I6E9G*W$e2DTya(VB5i%h@cYFeBoFI`z*UlI58 z+2d8clLoK$XI|<+wWo@3o=b=u+5$@gIh5fr{oCdv+u~_jS$6iVPoaGA1}TjFPgWz&TGy#bDbK9U~Z3^_W0T)mqQu0T5J>uN21g?F*Tb%b72IpsXETp|paX_#P1 zyQc|Ei%>>_D*8js9ql7-#OJSOl*?F9S*P$0o{Aa^N)68{xcUWy1#KUPw_`1FbSot> z>m4L9^xHsh+ft9Z%=xctBWB`41K%H-+;@tT%<@~4l6dU=?VmUh`CBLXaA{@p)~ib= z^((8*nV@G~+UuSO_@~$|SgEz?vZA|h>%_tQyKWPlp-;aXVwJbcvtU%wX- z45wnu4jE`cF5dfDlC*;#)Ra;B=dtUbMz`NX89N!UE}w@*6zE_5+gSO(m|c(^M!ooc zGC)BFZi#%Y*6QC@{_=0=F&P(k6cT}4grLt6ZMq|(*Pio$j`-gi*xz3-*aSJmrMBe> zbR>FY(Z_B1#(!&&hAd0C6IVf-jtsQ2{V__1cw)h|>&KbI0@U|2WMJHP)ddWX{5Rvm z0bE>UyGekeSFmZsf~cb_B3#Hex3};G^o{5O2RyX{Y^IT@ighOg`gcDkLF6=eb@{$e z*X|ezU-jv%WgbguUqTE#|6wyW7`Cf*T0>5p&SIj(N5xm<$4BC2&?RWmIY%D?pwm7lK}!(Hg$5dtx+7fx4T+_^mfDmVSNo!uOlWW2h6T z#q1oxD0?vI@=?B9gkAgUzGhW&d&6@(uLS=A`>j)(FS51HhpzY8&ppyOMlo}mouZ%A ziF{8Ni-Zp|LGjixACkjXO0IXQ)hKRErq?K)^Q9@N9A?EDIcUc|64a;HJs&{B_>9mL z5{UA!dF5?7cTaY%X3f@E4vTV9**30B$A$S~9fH5cw1sOe%Y)v>{WQcI z0tBO)yzFC_G?inBx;ltI4q{zCmX1CsodvCFKN!aodKS z@zlK_*JiA6d;ij&3N9UgHD-|CcGDtc2{qG>wNZ0E5-n~W+wVBWxdz{S9lMbrBcf^e zxF>buoU&vFeXulpp%e=2Ll2#zb3h&_kk?QQEvp2zx9}-*@zv}hqU(fgdp+izGK)yA zk{e3Nl*Zs~Vc#=iuj!+O@aoEMzJB#vbymfef@trZ)xZb!uThybKXUOR*ROl#PY=KG zR_gTe@evr*-c?VMewoPY2{sw)GZHChs2F%lO z>Z(ze16h1rkGkJ@v;bT=$t3u4Ied$uowD5Tx!PmhXhq#G9Pq?m_QP(Xh%(1yMedC! zU#yE|1vSIGKP?2NTq~+N(0S}o_+!H}#&#F}@`sxpX82Z=PSNZautVNgE>W{=h&4^P zuGRnXN^K^eb*3@r2_dIPJbV?v8AjtT0mI3OD0=!H%y4WEPM8lxPD^k>VwpqvO#Z{3u#O!>95f5%HyPSHl6S) z1P0>s*={6bqiC>MKuA9mqzVcDs)nq6X4<6PP+a(O-D};?KwUi3l+8fl^_Il8g~6mT z!4eL0o6yl8w2v#_^;)T}N8@lo%2!vlw|E-w?tLilP zo9;k%nN{Wtrui#aOVA4|JleGLBmFLDlOCt3l6WrzoL<*9N&b6HhM+ z7VfF+$hWb5gkUe>t{39mmObxib6(Rn&ex~eD%r+X7pnH(yT?qrRFll*8L*h^9BDfQ zLqpMYTNZf76;dn?wbx)%qFZ2K{?@2RuFSA1Th@e)BDLr78FaAwPV6wkw93n6Il4Gv3Uk zHQbgQAJBjOTHRUf&(hqeSCuy~0+p0=U6Vbv!+h&M@1x1UgGRMwC^hJFa466A%<4^4 z_Eg>X`?v)yA7p=K)UijwY94uM87+aNF^^dVbPh944$cmZ1w=-^ z-cR|Ot&9$hE1LDvoNUUs^A5|>9rOr^U{+I#cu=lg`@Fq*?ZAMdcQMEYbS1fQZf$9! z^Wg=y??$)Q9`_|Von4c$o4>&Ou|!5%_J%o~IxQ-Ljmxw&${+V(X?vE-Hf>o}sS|QE z7|9YII&{ecw%MeU=N`}i386L$jcP_b&yI{q0jCwa%0Y-dx}cRPZlfoex}WS@QhKS} zY{ZO9;&$Le+mleyTbgT?qyg_s@ZK3KxXKKnY$v7ePPwsWi4UKCC4M#olDfPZ^N=kO zNpIYO=>~mKGy>F2x6f!RwR<2-6l)IJEcv5je;3^`PUDui>-j2L`IZ_$L@0pG5A1VTHYJ*@%cK{4+2FuK5nu1)DneO($si$c-q4 zDCJ>JT)N>Hg4xvq5sjp&%vY75n-(wjm0)K*?e7CUi-E>QS%C{EZnp0}EG2G5oRV{` z)7QP5cNLgg8SX;dMzO|dAX_h%XwzX#jYu;iuhGn5P&hU#;ldzYrW-_CBjzDJ4?@b9-jI%m>-=z&N;zfgT&|{z*w-%8irK~nWD@3y4JyNe zQIW*gF+wrz^Bj|Md$LTIt4SX*W}Stz*aXCa@oKY%M=XmE9Z~;3imO zANQ^3YbC*Bs)ZJKs0TDSb#@wdsd8>+5L>tZX+JHkIt4Zd6;vJR8aGx>;#GcNuomii zhLKL<@ePf)WvsZD?auP!M;y>;C2l1fLtFfF8&e8%vlY4R(kaK?C85-wqCd)dw{cQQ z4J-1jxQgZVO+$?3LDejTvhvY}d|;q?x_DJ%`N;|e-W~DwQvsjvcO*8^QzCGZgRAT9 zO+hQRo2_pfvT~@x^GqAg$+Mi|tZ*>;T97AFp)cA^H#i(QW=D@Fys!Ukl|XWzfqt(i zM0#Jvut*NPo%kgAc`Ls;lwk3*FnUFv9-c`|zpEk}WL7!(LsH%&e>jXetmdSedneWD z6ulj_lLg;%&N-%sg;w)cz+h%VO;wG)r^ZcM#gmk2GUlG%D0;zZtWJr_gv^kEWkiA; z#PQAxH7vj zIX6XI+vL>w@f>Y4;kSm*Yv@w05!OVYCh@gL1aZ zr(PeR(7f4GWB^i}zQWd~T25{0L?VMG%-lVrFp3OSS~Ac5LVri9SkYyjdY!4e!|DpcY%QSeK4By;$kB+L+>q zzaKyIsqMl0wXc^s8M8>n@@ZZBN*P^W_S_3eM^POPFVo5C3{w+ zy!%uQWBXw2s$}{diwvh>;X0LyHkZ_hk@c`-rD=yzc-Xh#AbdnUM~CFgRp+o2)RN}} z5tXxyX!mhq(6$I#9%L2bH@)qZnV9+D{d85!kSkY)c7;@5%*EMS?EuG_g*a`Ur7Vgf zT=J#&V{a4Qx~~0P*77n%%{lETmO8#0GCS<+rZMbMS~wBVg7s{HFzuCX`qD0Rht_a7 zWMw5Zz5nE}pnPwjE)A~hHl%hXv~-`uh`vb%JYdXJ$p->^Ev%YXyKtArTh12cZ>Vig ze4ZNP-=2rvoO~%Mc@!y806GnI@ZD92t@5DNg~_+aJ}6(WmESD2jZ#w`J2leNq?mN4 zn9bkYvA^F;cg%-)Hhin3WCeaT)03lH&`WV(vxDX$t+5nf`#GJ!1+HDMx1o3m=ETIP zpg@TU0>_)`l9w8iF{1^aHtjQ&C94f)7i*bnpM9!%9R@>NqWHf#tj7}A@F#J?wlEID zl_}qA1q!ilw5bT?tG3a1*dHhv*yTLw6s%k1kN?PU>cZ*0=LZK7un;+vEaJ;hr23tW zp^)IuO+U9tk&Yk4_mF2WhOtrhUi&2851T4kUaxkteRbEg-b`MgopCsRMSxn6>lw$l z%O8e2QFm2smF(xkhucRkJNAs4ac7nk84KPwd9RqEXJbh(y2BC^glv^=)bL$mql$7w zKYvvgKpTf)1vgp7!vQui2R&w!!6e1jz<@5Zs0}=SOh2Szc#C2gm86O4La^l5U4Mnc zk|uj+Et|@Ba#QS|$IhBQrSiCELyfuNJ06Eh1SA) z;kfWR$ZMu(+xFsIBqI8G2(Kf4c3W#R#`B3%Sb*+eb`Wu}ZdjwG8)eJlVP?8-R<-t) z@UtRMaB{ybUixhUqjcVlzYbh6_9{4M&J#-@-$8aZ(w3V)l*Oa2A`W!6XU0hzC{l)> z%>2^!eg^X14?|ivg333N=NDgaB6Sb%CQtlN96n9xtS8A#ei=nfII{4QCPrf#*{Xet z5RGU3lEbG>x17v*8CQVkXw)ME^gp&Hg1+oV7#L;>7;#dk9L=EO>y1!@^aV5olD4CK zaXw^#DPc)IL6;X?hy1lg75aKOpV-iYhfWh#yE9f12{Dj{rl7AU%Ua6rzFe$n1q(z; z`t(aM|a8XJ*lQ2^O6 z(r>k@Eq@YwDiB%=T2so}0eQG?>>0F2c7`%b*wSz{oIjDnmxUU0*;tIuT3h*HuV99D z_2!S`^{D9t?Ac0UhPEw6j0~K^XnoD*!+iMZJdKOkyu-e6OgO0AefKlhRgJV$_gDJ5 z8g90956?^yxE%0p5Ox(dAyR)1?+?@+i*KyM(XSjFt5zRLX<4c`X1RRsZt62Uo#ldf z^bnx)y=)^~`L(lew6I?en_|9k;B0ohZ1mZ~_92eDRsMm_<)IZjkusm4I2mN(o~%6f zS|yr$EJJ01=UerO{OR*#p!w0MkNwK|rvou%J@;-=r$|NF))4}5$H2+yqv1syB#a`* zZj8X?D!XER$*u59+Nc?)v}NDxwHwEPN3r56Yh-`{x%m8N%w3Gh)Cz2i0z+quYhB(_ zv`)VeZ6i*T`m$?w86mk==XtuvKJVfNBiAF|Gva%3srDxCdc>)P@9-~|S#rNa+2qR9 z$c6qqo?=lwY>vA5!F;FFkVAcgr!PHtZf@UU>mX+cb+cKj8*d3mmalJ;weDkOA^D<((rWqN;3+R-QFo>R|ERjv&N%CnX(2M zJbT|n$$%05LNh#Gaz~G&`(ejoa48-;K}^^~&C;Di4`ziYlQ^89?P|7)=a5*oEegr1 zW$8>Rkr~{-D}CFT*)s_js&lI^*`exurTWweHTIJ;; zU2nDw-JcC5Ayefmpsj5xPrp#pI?NpvfZ;YK)?sWrkNoTyr)k$|`b5w3o(nMtF9doel ze@DI3{n*(Ue>rA|4x5iNSYR@|k6bz`Wmz?$zyGm$feZ-Gb`8r6*WhP=Zm$TjL_Gb8 zE`DHrg23@amc&>4dL8|;nDR9vbcSGQg~Gkc&Wvaf27lK9J>c0x{zO-ifl*Uf8W^gg zBRq$0f%djr6zr!{vF+EcR5xPA>*D|mTq==Ars9l}DT={o3uOwP@U<1@O|`A0jf5-}{Ocv*S1 z;UfpfrS;wlo`A%euzk0L?i#i%ya&T)(c&v~ojJ4Dmy+t~J`#*eRc>ogBq|z56h~M| zi#`}N;?)Ni2K1xKa!C`qa_F*6>t(i&YcA@z*GJ-EEjFQyv)>Iet}A8TrHNzawUB+t z7-OBL)5r?(LV9jb`xBUOg_xja7mONqfgr2nqGI8JWqXhG^v(W`kRQaKpGOj+edpdo z>Yhb?6!wY01GwY+*!Bc$*<%M?%L|za;nzME##MQ}*s=V!rHcU6?XeMBjY|zNtaKjQ zSA`}nebXBEFNh|cgu@Y#NZEsJ-*ZQ|giKQ3M41FWiJB-y_1Q!>d?C3J3w1d+a;S89 zVtUB{V`WofJza+mt3cevE70(*H7C%y@%0Rg3LVn2bpyRCkFiq(y(P9gnyh(zGv3bH z)s@eV&2*hu@>+mzhL*Q(!LG)P!7Ad2xpKO~L)zzvx51HID*i3ug5k&JX>6-k&}Yx# zdwp}@&YB#NIh7&ON96n0-COocFqSX8x~J1y!(#X+4a#Wcud-$MECS_FBI)>qKl zvicjE>k00akEVKu6gQx}R^<(QNzik2v8#6q$40N~T7TOn1DO*m1aR?&`V;X+qe^DgbcQhWc*F!#@DuHOxbPJVm@le9l{CuP6Pg`shII=3mIq}Cj*>>fnqYi z>YPYC0Y**t+}0`MrCu7!qvziSX?L6pGAPAU6eUCz59G$9gV3dL8Dw^(o<3-D1W6-JCHjzpP^tP-*KHK0=CQCho$@@0Q%+l7G*wUsSYI6uBpJj$FEL~iM=V)_a)LF zQmus~uTG|$>C2m4Yhv}m3QwGBK2Dkwsf2bx7#c3)+_%QIwiPAgmyPAv#XIGeO*E$) zUR-{V*aTdJ6Ru!{)A1)jGsEOm%&Biy;FM*xjJP8=eyKiNkDKyq*QZ-AQfPN&pTAfy ze7f=&_Id#z0R#c0N)zXM6+LaRw*3A3>YeoTeRImD&W`vZegj5;v8d<;xXFPL z1DQ3&s*XcIi;E>Z%mm57#^fC%(3A}Z^fpCi1h&4vWKyPu=ze-I@ThrfT|glzPsb*2TgQHs@r=fZx8fafZrF3gXS$<^=;P0$ zwMnNc=8uPOMQLAPXXv(8+Q3{`IK4g~WC)u_-9fShqweG|X)B;B%+j~gvIWM#c)HF@ zj;yr1l)W;dr?~s@&q~di+>SS&Af|)nNcK@;21(T?=g8Ye%b8DVX;amax=(_ayp?+p zQuef-YD!15)t{W@lZc>+6I=|l81ORWvazico5*c0<#^(TwC*3zFA*HiwXyRmj=}7n z@XdQOo}QLDJ9Ta^7@(yCY**zsVc;(}n()@(5`jZExD&+5_Ul~KOK{P`72LFN4%`S` z(*?yDL#jZ>(rI>%MQan{NgCd9wv}0r*oipU4akwxc54nt`&V2nK4I#~AfFXb^LdDH zcC8(+zCBF0V~bjgdI`4wyP!euF#_2P0f(Etq@P8|RwvZw4~>TteGK>`y;o{gKf1T> z;ZNtxv@a#)pg)JFlQnmp4Dw9=AwFH!=uW;DZFc2ORMIYZJCz)4`OU*WU=3OYy9Q|YWOsI_RxGqmsa%5X4A@) z7K}*S;BPKBnqzlUg3OS^h23ihn<$>oQHFSUJ7jYK4<#HWmMgysuS{4^Xcw0C8sf-R zNM#W#liT*%NgX=lWA~F?#s}-13h^$R4O&*)GF4E^=ENcS&^fo=Mfslhz1K#+`^s!~ z>Z7=*v;4llE{^z07xiD-3#Es`ptf){E^8q+dUTI7U>zK>2C<#QY{YhOM2RmfEQ zG_z9XJ*+b^dk-?5u!v1Z7aKH@KBP?gBs35&%qG}2H`E0V%A6NAYq%)k%~Vbw&!7zc zH-{u)TinVZAGoyStG0oNkjI2f&ik5hJ$_K_tv&y4c6%Q|5-}yDg0s=^;U3(i9IUlU z8Ie}=fDNlhOm3!&8OxT#EtN&tjmxVrHHmbmW7A^ifBNVfy-M@Ec0VJGb8aXyASvi) zIbJDdND3QTgZV_)0YN`Rw)rd!*9YY6&SyYkhhmV+Ijg_YVY)l(YX}nHHh_A)}Eic*|JD;j?2v444PW2 zFID;=5at`H?Brk>P*sNSHJKYGYssU^n}Ye&6xw-OAOKqSEOPA$8x;+m<8Efq03s^ z9GsgZ16l(Yh@T?dO2dENs#ORKEDqC+u9~Wz`If&R%kt*cOAld~JnjlOKR%uKuNw_z zmdmEINYLw8a3SPZFxD|1Z_|8|n1TAdkMe=y!DalP9<^oH@1f>BNFpBuk0zt1$_{dJ zD=qY+AFYV|U+leiR8wo$FNzff5fzY5lp;u#CQ6HqE={BhQ4x@?bR;B7?+6G82$2p_ zqSB;Bx`>F>NFsy;1Sx@p5&|jQ<^JyZ#`o_1jkDi<&KURJamM`v89H2RJ?mM|obxy5 z{LMM{#{Y@CdKp4}$ccqBll1mjlrARhvGf9tX7p1=;~vYeG5ChqLloVmom_~zyxRlb z*#gl4?>S97WpZE#foJM<;7H#<4SOsMffVKgFWx%oYR?>Z}U9>k14DXaaZePHG*5sSF7o)|&tf#t^WOIUN0Y=w-Mv9sG%^!4x<^ z0RVgvaLX@Ag?lXI95DVh7(mmzsv!C&44^;wk*yDlmbv#>7RKP~K1_YBSEy_zXzq9D z{m(B4`tt^$r&Y|i;^7-h;2pI+7IBfU5P&kkxvmX_S^l)EDSv)_k2L|Q|FX-Rt_t(; zm>m%7pWjUHFE6jUox~k>A3@(uVFFsKpS3n?1KdSVi9r4N#ftv=^0eM~?RTckFyQ;2 z19mFn|9hWSb={gcOpc(Gkxf_L&3dsDz$-g|ZU)DAqu#?Nk?>Bpk|mte`j(Me0^|6! zL?+~-rGERZi)+)t7LdAe;H+??nKxqydW?^#&8>S5m()qYu@c5|)*C@v%GF0hFNY4f zV(BR(IY0^FCh6bCsK45IRU~n?=58h!oYsyr7a)37RL}n95lIR1qSH3RUN-tK1|l>%Jj3857eFryBU>le%9= zW4Dtkgcumc5k>=iZiFcVKf#K*P{jLU2EzW|Y$fsLmn<5;X~BRKF*VKVpvyg^&$=WS?EK zJr}~JQV@Ra^@%oIw5B8p(FG?N4)IPwk7uqg7@;+lE0%(`)nl#N)6MRRw%SezmsfLU z%#}l4h`V>AWN{{dBY1g!Flm+O}eR2C(bJ$cJlBZKMb^qz>autX}wMp@MqRnusjKow1 zY9*??BS1xQAfRSZ(O$erK{i25-%4IrNH@8AV_oK$Un(onfK<9bP@r=Kj^1p3+VBqV zbL1iHOw8at+0?-jyBpi!3tty5PCt1NAa{ofz2R)ahZ!~=Eia^O7ZP%tu{WmbXq)Qp zW-#nh-D-xR*RFx#=cyWEm}wE*3lzVNU`4rrgs98cTSK+1XhI@+>$3b+pwKkBn>Xp}ZrYj(8-d4_16ycVxuxsC7yxL3eDO z@>szBQo@x#5x|k*5@(b%4>J3~tYT^QxyV@cK_p%LkWJ;w)=RLCCqog>`EzVzXxT^? z(oG;w(MSgXS8x95ytJ+^Am64EF!10u_C!m4QjK_^*t6({HZbK|kZ5CBM3jK#ud(q) z6@-toE0qRdX)%7Q44aV(4nom+1#+VZLksQFoVP-SvI}#mN)cw7_QwzdL&@bS{ulGr4D|7mR^h8AJk<7`pvq;S@ypt4w91?mPP>1r*hn+%T4YK!hCnwL*4sRuN7W3vt{ zoUS=I$baU-tuER4V1s=EmK&V`_uf!a4X18T-&E*h2{HwI-yBQzdGP;vC z6nSRL6%lL|H4#-i!M$;T_b!W>^)MiQFEk zFA41`q}geyntm!Do-h5uWulO49a`MB3Sy$i3bE|b#uwFMNDWwA! zWqvW2B6B%ML)ghoMwZRREbxnnlgj5G&$tzj?Vki!TCSU}=#kilD1nUpR*e~o4c2a~ zNZ@OWCNwU9eay<4gKl*2-q~B1?uo28Cxv<0xCo`@6qr7gfwU|!z4usT*xAqDHfh~l z7?&SdK{lf|KA7w9d<;0mZGPpCpZ|`UG_Cp<{A~l;&Vdsk*%@&Bz%Zgj#asuL2%Rz4 z;b!@Ff2{u;3Lb8z&r!vIs1urIyT>vGI9{Vd^*j?9{%HK_4>#oc77$rZzP%QF(7Aa#q8N*sfz)) zlPq*2Y9>rJ7({U1U3`kOm8+jS)z*D_UH^l!VbXy+-Ae~o)6P3fRq;gY5f7C@AC1Fj zN++yPwbRvwzoz9x4#kbkLUcp9+z&E+FX+pI9)FUl2h`%>tN8X)ZX)XB6gs!yO;QSR zs>;z_~If|3vg7aDF*#QlGOWoYKx+K@eMELx6=w!C4;Txw|;yl)8D-~_FM`81In-q z^s8iageIJXBTyPcYJvwoib&>sT?)JA@}bGz?OBp}v9E3T9s3{qh7KGkl^Fy(vpQU7 zvRTk{m((Am4&CD^Xv{J4J$2Hz@pQR)x_DhY`EwO++G_Cwn@?no9Lm*)Dh*ClVb3}Z z15w-XO{$-6bG%FV;N(&9!0+Qwu3IpiSiV(LW9NHk!U!{Lv3@J1&AF}SMGH*7)9|rh zcnihSv*kmJ9z;)Kw5(Dyf>ObF&pb=lX0+8gDBmETE^#8!I;@trf^I1*`(0XQ(4(GQBS!JmQ^?wure&6UlmfROCSrCZW3ORKZr< z&r_~%Z+VbsuTqV`eQV#lC@GABH7}cGZCP?aU%Y=~uKc3YYyFKA`7fjAS4Gr`2j14K z&GvQEP?&T|5F-P2+zoBfUje;2?qy-0DI%;Jm88C|XF=EYPcuV@+m1zH@BA2*;oGI{cIcz*#*n8Hbrt;l#AH^UB~(2x(TjkL0kcDolG5>< zxk$PjHKk~A*hc60xu$o?ONXr6Q%yxYQ$Acr9sQtL6WD7Bq*G@qE(T(PR;;VtDCV)% z#CqXVd`0F_`m;?*mRB}e?zf9FQhhjAwrMU6Su|7KoU*v~v%l=_k%E%EU+wIdiM2c% zEphi0xK8)x1G{hNy=A_8d+tkni)eD>(0G=hjh+MRuWxTU zIC?&LehK#_;|dVY(DOBFkhpgIcFtA`R*-^vOVV4(EraR+wWF8OFiCq1CfFE%@JAnI z?v%0m>zz!aU?Zv7^N)tZHLS5rInu>+GAo{0M%wQEq$LNFj*^4;pcE6+>s0)Cp}P@iGHQ z%kc{7B)ir5F}F~4ciy&^C~k9G0pO4Su#W%ZH&Cst4DVlqO(6at)eby{xTAX@wQBVI zZKB)%9Sv&aS$nii`-1A)Es9WN3Ty#Qjy5j>1%K}~o%YcG$Q4zi0Ew44m?(1ybOOpT zg9Q^g{OrGZudK*HzvDN`QXf^!-|QEd+jcoHHo>_vKelu@gbs*%t?Xogtm}CurMZv*$vvf1zwM;hOf7$P(zR_0INv{IGIv* zXaFxPZ{RBLhb5M)n~b=%fo`CgKZXh@b&O*7^UH!*M+y*aG@nPk<6Rji1Nn;>GJJ-$ zXNq4Q#_yg3fN4_lRDY1q@U z+a%MEL@ooM%fzU8TjqN2^e#nEqf&JCKwOPnqS9sT{6VfxEc-se%g8De9(W)tgFA&@ z-~pi{i9?+0EftFC^a_w`SX^6L%(gc z;Pmv(o%jWc52I#}#S<5o4z0vBONg9HWJ+p5s-cooDch_2?EpdKZ#VRqrL3j9 zdwTI0&!z#^0p|d@wfOawbf{zKfH8nYq4#K~U|k>~?Gwz=H3BkT-M?Z6br9=ufqHAz z!4R`_xuo?)mtdC{_qqW~=CpzHqv5izy1J|RK-m0{G$X5B5$XxwAG|groQQ6hB5k+} zco8rV`^84k-S#B?$C?}1r1AlA&-m`QugFQ`D-hBK=pZ}8);eVWPpgiaj3ghap&LQhb0XV$P9vxpua zu3v@dPMBhNkBY~vpO;jN;t_hsT%XLK$i-JXsWqW4b@0FY+Fy^^(D^a>qF!c+PJAf|MSCOKpws9rzs~ z)B35#qWq`F^U505&4)5a?1^_O8^L5rd<-$2grnfejYJ-x0cLyNtg&SG6N&s(cAPuK zDs{y^^z8ZdpRJAA4ia@AKW9k$Dr0{EO$PofLcqWM>!GNNy6OHXy%?K#$9t@Xuvxol zC*QVep8L>nt4!0a?5oC;GVXO0#D0 z#S1QCj~nz)K1(hHNI)}%h{NsT&;Ujr;vO z3Pa1neBh?nDy$;8v$R%o#%tc-(AgAp4AJ%7H-D^5$)svq@k#wA*5j#f-hTRYNvz%j z4WyO+=4k2MJa^95&wsl8=IsT;6K1!p?7UN29|Clgzx(1TSBfCqgPLH??57E5QA{el<{7!SYd>UdH=cfcqvd`acK-3JyAPgonsBnl zCW~@(V3vzkUG3`CObiS>cM&aPKu&OR=09Du|6zpwXTM>-(33!>ZkbWF$MOeRHrb9L zi0tj48ADGnUhT1XqcE;aUWGU?9?)2kF+j3o1-B`P#erAsKx;rI?(1||t|pbq{IR;@ z7m=cIF|sW3)!mv+`tWFEnxzGN0stM*Q`9_3I=9`>?3;&^Buz&)b)zMedjnx}C*t$E zCaw;CVyCsbeQN-QL)9XB;IWvYE=Urhc0OCAgv|O;pjrK+w;WNECP%=t_dk}E8aPIt z>1#c0i$JRW_%}}HdfmiDu992a7=Oy}whyEYd41wVdA+%zLB$$!`!@Ur!^D;Hjgh7q zMy@p{!TUJFbE>rYsK}Y9L`-(^QJyb`Oy5oywi(S+ypwG=Z^c z>7-$diiMXXZ^?F}y*8RFLrp>z{G{J;+1?A5x5fiy_M#rw$M6ZBt%d=vh!YQREj);ZQD+rrz3R)e*h zAbM%Bn%us}YHVOTy}LcZY($R%FlMrHrWg)%k9L6^g$j^`i#;?2_!}pxBV^0Hw^-J3 z=eA^PG$)JigkKfDg#3d0?nB|FE|SuJcPxmkB@iRy@Cpa)mc9jB%{%B$FI`?}ebM>8 z!|T|3TeH!@v@iGxCE;k@m02SA?*+Os>5jjLWlcrPSQ`U9K?|^IX=n>*%Bn3`IeT?L zAA_mMV|XHe0H-PZDP4jZ0oZ_t6VQF6K0_u3y;{$4G1yCc_)ra%xXLr9?ffP6#>H~U zLmgW0J-7gF)pIZS!?nrU>m;ooW^POkQbW4?F3)E1Ngp^ztD9<7+PSsO(4<_Uin~>( zuJFA+^d}AQS4=WrV#IHPkho+J`q{d++0AC!(oH?Je6G$!OU`Aj;-~f*Y6#8#cN2>6 zFlsIy)B|BF26H>nMDkn(mohE1Yu%gz+#Y4!GD&Ye`Q;N*Fit{n8hnAc!6KB>Wy%lu z&c;lBx+ZC;t6F1*GsZ_wQ?Yo`?Zh2pg0QBMtd&`n)GF_7Y*WV7OCN`>s9S$S{GD$0 zZz3SHez~119xw4I@mND|ed`aEFyS6!3Sup58+XK(;n8-8+f0I>QJTrGk=tpOj}Bix-ZkB>%6JKsznB6AUlz=7;FGQQiozng+AogDNq+Ou z9`{niiX|IGJN@jD8903ad8$w(_?L(2UohRDHn>I~Pgbmyepfs`YE}E*xU8(CZahc* z#gv4uMcXTfRb()ZzNJkrY(M^I)B9J6T?YNXYfk>HMgH1NmSlzvtitC+rV$$otw6#b zAVK^Y`2J~=ik2=RcD5Co_E?^W{_hz|kH4~6y8gVgz+=9<5`3O96`5muEcLwI@ECzC z3MwW5PPN84wXI7wY`_LfrmzFF%2+~1-~bo$OT?v5pUxmWM7w}StKP+SR!a!~$4*Pv zZjU6imo5FSfV2sb{AZo_wId4KFEIel4G=L0)#`Ok2@(_{RAe}w^lZgesrt8VJc~Y> znk_6KX_lLPEcDfdWBnapImf4k(3>cpVE9e4c8t=GMNHc(-ih?{pZgDemQ8&t_Ri{X zd-5rZ`NuT*q47#&*OumFPhVE-7yF<4y!;8NVIvi_kRHj&t$6 z@p|x}&c_gF9~3pv?@;vp_Go!&;q3UuHtr^S+H~Ib(Vm{VU6pn(Kl@c&?YG_CczO}3 z5#Yz&r(2LE6a9g9C-5WwoF^CCPod@OLTbdvooWLfJ_ty8)ZFs4R!QDK2a7PtSlxy6 zDquElQVqy5ePC`F->-JQak@-#P04FaUwL2o_3|Bd*(?vBk4ZLqAX!yPDL4DAaztXL z^zrm+r1TxEe?9YwKQHtu_uS>zO%Uw64-Q=Y- z^t6_HSGxqiJV$Y1>#!VCK6T)@$9xJ0bBfn)f%vW*_GPVfaRC3pTfVNv!Cc^a@NP!Q z+sqrjkk8opKG`b=zh2uwYxI{`sn~Dl5f^-@0p8oXGIrib9n{u3+z+hx5SfkgK%}I@ z25|?sz;WsU_0^O&Syq|qUL~^^qC<1D56@uv@2o^UNN2mMp(WF)YMO{^+$hZo!Gpqc z4!^q>R^(=l^xf@gE8qxXUzsh#-WaWy(gOXx- z#ph}hpRJi>*o^(0qZvyq3^opYF?sRg)stet)oTpe*#W`KA^Vtsc>|o@c$BjYoE03k zDGavvIn`cxPHo*V{OkicO5{nNekZVa{>9A&v0rPG=vm!d+7&41o3_;AH@V!URCPz& zs{Dubt&M^4!HsuM6^y+Q-)~mYr?NO^dHDtGE1|-~%-ok|4~Ix@i%jW2Lut;iu@u?` zs-XT@otCs5yqhUDrW2h}YPfRXjr?M%bINvd#fvMI{UN*!9zGOr>K;oq-HY*RhB{ug zXw@7>PhF~+u&SBoq-G6B%k!^y1t+@J9GxAh-R;(5zyQyT`Oy1CdU#_`Asyb2uo#A< z;n=|*Ygojrbz~8FsLM3GU3kLZz%_GH9@TKK?#tT~!qAr7@5M9uEssLEAYt^wWVJp{ zo?ydu0L0=VR$6(GdgTTj^-)PH*24o2MB~z&#Fb475|c!UKaO(?`K8x^R5u&4&?eZR zX=cvz@#g#~9d3&gzmpi&fQBOhj?#`YXDodCAdrseNeb(La013w4~Tkv(LTG!@&mwO zrl(c&e1I%S7H1r&@`bq$bm!M!;zj>csD2N6;SZ?(27u~$@Wo_9h8FURLiF!J1@#cd zD3YPr?{Enyyjw2F@Og%~I}FJ~2%+4ODfy=vMHUno_3VtWWY2D7j*xn10(RK-yp_T{ zeSPrYSu?4Ravx}${Mu}c2Bvhd&#Yt&j2$)xJ_g1gM3Ft?4yRUppRWE{oW*r1SzP1T zyZ3dGVw!&MlCpzdTm5LSmmaQ=;9JES8DqR`gT|GUA3ZW|k6-Io$w!@oy%>fZfsG^T zwtL_ah*R_fBotmU(jO?g61fz={3)C{hgHmB)w!*wq;o3#sCdi1&9%IlwFD5!OiQn) zSuQ3K#`;U$D#EYauImk?2WdMXQJwy|QnOzG(xRcq@506tC&nW3V$sr4z#Qng-5mIY z_}b*S;F`zIoeE30UsO*TPO$L^VxdpF6ddk5P@Z<= zhyQy2vi7u^n*%mXUZAs_&LxOZ^7}QiqgK&3ZL`7$lGD>bYNywQ%VV=tz9lS zlzAG}iHMjMgde=QGGP_8U4toEQEU-UI+hgVUL9V1_~MKr=nO1lZCaK(NCQ$(>?E}W z?UQuD22>Neu3x^Mp*yQ|S;` zj@vHc5un`!c!>y_S8x`=rHvGH1JG{E2wtkolzhE|2F+2Nrm1u!zA=A0;lP|R=%^Sd zw~Q5}8=OUnViaieF~5i?Q3uHp9|a&Wt=4$jFVT0yMJS_6`vi@+>NB>*nV1k#(4gc% zo9QR@18HroVYzRZf{u)FVp&*dU|2#Fze!<<@Pn!9g@y05ns|CU2}R@qTpo6ES#%we zm+CQN8iQ0xa{tP4t#_98WJEz7^YdV;cE8Vv&X2_7{MV1q2yBCsL3Ii}pl7J}LB zmKe~6`w^C>1H0{ks9z&C&Tz6KdpJSRL!*!mGPWl~>uX+Bk9eiWg=6pEMtd}g8CS6@ zUyl@OxZttK;RiH(Mv_@)fQBdXu+NTc4dkNWWGl5mf%-XhXr@`Oc(U2e^v*=`4_zm( znvb*6M>L);i5IVIx~aMbgw_2pR0Bqm3>EDloQ5H(yr>2^vAzsy^_F@zgyhsUOhsF~ z^)f-G?B!a@3t@yrno{%QTymB}x+^3qOy{?`9o=b9v^z0ouTwvE5bmi;xA#D; zeCE>OY4FFl5|Y5~ls1&aF0z#&S%^~AJ_nn?)ja^!q2j@=$Y-eI{&3zq)WA-(xm-{= z`{kVJhjPVfx^4^MLb6%X&#uZPhb)NnkxU7a#0-LuuwE%p>1$C$1$P}D2@xNw+6wjP zkLo0@7oF@1(YP0PS>uA1UV!173YYX+l(7nGCv2C;D!r_@ag7Nb06 z_J3$d*6PUdsQmWREOB4+8u7&Y^@F!If5}X|SzxPC*{%?=WU5xdw)}lNuH7Chs}sgi z9n%<~t$^({I+skRdVbLiL;yA{a$ZM$p$B}@VS49jns&dJ%4;IW_SNRJ87E2U%0-~i zz<6lq2sC()<$Xj5Fx|-r@)~ulmAw?INDacHVsgM2^Myy{{C{EjltNBMxd}2V_)a_h zNQWRGB;${|1ppmt)656FdETSC_e8g}Ka;Pc_ihuv$ji@cYa%JeMGCB=(C+exLsuDTi><= zUo^qN_E^SKqJie4aIOxL2qXn}P}5RF$A^Tz-HI-7!yem(J|quFu@=50;?u=arWe|v z(waQTV51r9XJBu)!jM)Sa5t^ZeP{NNk&V$M=_*r_V2kb3U!0f=evSZ3>X1+^8Pbu0 z;j$#hgcn*iQu}8XzNsE7x}1@L5C18~rYo*^-7DNLZ?M-GmJZcnAWqLxxLc`Q9-i_F zo50lHhT+#3=W+ke&RwBDuy_4t?x=C}l3<#uFly4_ZyXd}%bdt{c=XA53y z-oH6)coE|F3g8Z<lBSm?GpOACB!^mNkphMrCvqe!PF7p3iIfSU zmu4()9C;1QVm|(qjjjYB1!rU&Q;F`_LPgGUC);QXP}Ph{-pPwx($X`W z{UE&hD~l>E|A}?+>jDET(eT!eLRqU~hOi99GqmPf3JzUL6gV+(0$*dkeDs5$8W9M zI?{DsxI;+bGb4Ug;`do`L&f9X%a`9&x11WK=5}JKR?jky>d5-8P9&Xv{G6jTqm~AT z_kV;9h%w8q0}a8OE&afxTR{0IwG5vX+YVx)K^&%&0c*$ZT#O5QB&H-4jd%92N`3)u+SCx7kl)!IFZ@J+YE;9kY3*Na<3E9OHAPlLdnh1&- zRqHV+i=v%BYoF8?3w0`kP}8kK8u=TivFZCS!(S@d$8JZATw|8L1>fZkVnoQDW4b5MX95P=UuiGG%DmOa!1}6m;)si^FOFe%QuBA@@HS$gmSd>iXS? z3NTvD0lynj$!tQ9l$ZhE~ai#N@bk z45Sd<>ah_f?d%s#K@QtYhKz7p_5V`kIIHlwfsf*$Tbv7hc_`q`1;(D5&qAx_ARXct_bHXJ!NOAVT^|v)&H1pJSi*YSOaunmQ zZfbM+%(m1d8=8*YE8eyNP+q}lbb7e7@t_Lf(wD54NUE!=LDJZ%S zH7e&USm+U3kVa5r`{8Xczp+nfH7@KD&`BYMc$ol>XupuqiLvff5Twpe8vzr6D)3X< zMwKdVA~!D77G5}YCQgvU6uTn`SrXrFE}9eoU(3| z<$EQ9+6;3|(~5h^_Z}llwS?aoi&Duo{km9E+D^4Uq}hs(5?yj9u!}!- zm1l;C5_kZ9Ma;kxHU02YBHK4&Ow=>eCE7N0*ZD=c$kHDPJ!QPSArZuYCLbvq4W|pJ0WH z_8nfZ>kD6=hC}ANz_JGjkO-IpRqQ4;YDR`!{;Rppc_zzE&r0{axbNAY;z=D=7T=O* z4>BLpb%CX3Ll2P4I*G2GIYCjaS1YNxkh-Sf(JtOy=mWvZ$cN!3odze1rmu9ezIes) zbirpBPBMi=X)987Xex^Wpk(5?(EzVolr?J)_NP{%`Ze*fMX5&)UuHWKm`ML*f9$~i z!!O-Tyxo1W#jE9sZFN&v`W)tP0 zJN}aVoy9;a%I`jW&e8xi2OQ720OvI(4~UBX$+Z9m$Xay}qC)|lk0hT2v{mWca2To5 zyj+6)zLY?a2@Pm(h{veL&s>{W`6O`#*rPX5wLFYzNyhxx9+`(Wm2#_*+>h1_4QL;-NkNE#g18a|9E){>D4*t>d zVrJ$+V9f%BN~T1RO(`7d1W*)!H7C`#CXzzjC@<+1t!2$-YOm$$$fM_ws%aVOX^Bm3 zKWh+!KYtAEufBD@G8Ac=wYZB=hB`Aow4a60*+-acL!_eHr1Rv`1RwkmRUc|MYUkED z9qiPY|L|Dm@X-_9KU=3}bc-)rEkfQP-uHD(Y99bhII>G7_m`?hoBFivntb(pueqkS zj8*4#n<%+cb-CuThcd38IQnr0rbOqhqmpT7Ia%u8n^rVNSNYVr+*Q+gV`o>1&A6}B z@<~xqXeBk~`W+uSS2|&8kEI7RS$lIdphJy<3YULC8XWyn%ztY5Br!$vr`Mc57JP9| zJX!b2>TGlrw}v{6Kd0E)ww z;qCxs05Esf)Pg;h`#%?$5fI7XA0Xk0&@h)Yew!v>+bTQ5jZk_ut@`5mj#oG9XD-7d2-zyimQ1J%k8cHxYi(r>@3OFIFFA>1_> z{xOrZ(_BERvx|OyiGxw_U`>UsI3h)*A{MdQRINzt;<7)<6U2J-N#%I+PEWm)rhr9( zZeo7gEl89GIK7sa$tFRVvgmEl-p9y*vS0FYC6vDNL??mP4yqg1y_t4;diA^W{yWKR zSI<;|Cvly-;P5{h=5epe~O3h718tzo>q#Ayop3WNgYw9hf!k z2KElaO7N(sl8#3?%?xo4C~>Cg7kvL|Akq50x_^UjebBdWjI&7lR0LpfFHy%r;hkcD z^!>_HYUsS4ev%cQf-(BpFTqLAa$t_iSLGJnQdlWhK-5KK&6Y8mp$q~@7-(S}3V;$h zYF-r($KXbtRJDaFJA27zajd%$d1~$5Jkf`3#ZvXHZySH;$Gsv$ih)qLG~Jz=5>j=Q z?25kuR9zOli=SF4$+b+l#fv=qSoE2Y`O11{MgP~JQ0sYA0#hqkkOCA8ho{pG$wS>Q z#<+~eG?ksoK2BIMp-Hk0m8a0MPjA&j~CbH zlfoK4UFFR1-Ut1Vu`_qqDW~herOz3cDyB;0R7R(>hK88?8tzw{x7h1@0Gx|HrXceu z1;}eT=z#QLW**1Z14*A^yG!!0bQN&3G z6#*=e5Y~JDaOg}#@tIRcg_1MKtaObC3J_;51ia*oHdo;EmjemBN3dSdrYU&65uk~z z2ZSA33WUJv2fo$;dk+cOK6Hk~pKnO@-@2B!ViKrjBkdnvV}vdI;Qk|czRLN%iy61T zwAw;2O^TSp9W*}pFc9DPJPm-b{xQcg=WU=>I=dcfYa8M7{J4^G@{4*BP?GcaEB%+J zVQ2Pzb6Pa|9hvX?0CwWsl|<~=Pr!H7Q>a0#D}Zp`*Z_EpPXN)EG=Gwpg#+pUi|}EoGa^ML(5rx>Gs8#(|H_B|^sxmbeF=>J0eTn1;2NS=FN5iMI)7{p%YW<) zWrt-;SnABla3vW6=U*T4mwPE#yr%&KtwTk)UE0ux^DHcPk8!$FBmS}@@PA!@!q^Jn zE>iaaeFNJ!R@r3mz+e9IMt}1#DI}ox{`FP#{<^_!CIf3g!^P7n@Z~k_nZkC^U(N=I z^e}jRR#eJZO$lZMF1rG%yU!RH7F$^SFwZpmESD%%ak<2sfVuW zWxK<#06CUm*WiD*&g73&&OC1y=MKl` z(XB7eWDd6=VD+lMoyC75D$K)v$;NX3@T%=YXITvFJUr>JK>+!#!MgAWWYzM{*@Eo8&0iS|a z2HtgTs$u=J+8S3|IZ48Gz>oY6GGRjTYh67!(8;CBC{U__p!V0$LAyb%|aPc)7 z^MxZYb#Y?A*Nd2W^+e922f8<0Wx1dB#9utD%Q*lc8F*>f*D~@0u*=l>6yq^Xi=d0y z^AE(jGbvTqPsa*y(3^~!+HjTgw%TirCpajV=Pj&;r z-p`LZF#JcqK59f5RyhZ;@*0F?T|}&xm!hjNMs2(P7dDIJoz*B!)|DB^?Q`_%ZRWiF zovR@}JX2K(vdV@1Lb1Lz$x2n6kFI_~a!~O!Bd8!FZ*$oL7q7l`=ERf@#KdD@GCXynSDFS#*6t3DvuobkvIoZG3)>Y4kJ5{;CknQ5UDA zmo9Smm|AspfNuJ5nGEWuRYM+5sj;S1rLK6%(ylZE)rOp@V$NbWAyjFfm8q~$FVXw})+9t9= zm_#xzWJm(&-^@O04e;2@g-($IzU`1l>hVTd5QKTety39wmL~DEBf9$<1 z7CgltBz9h;`!Pft-f@jJvTsF7@9E$>(laB9G1&C7(YyDDT4cO(#!;}Q&Nj_eow^b8yr6ZGX#@O-P_M<6}FHpy#BiizH2 zM+yuGZ#CrJa}@`F-_6Dm5FWM^RDORL5rJ27-~3aU>5a{#}+vN2O>Dnh>zIyS1{Q!t9B z@t`mg%0ohFBOG5GhXQovgC5Me3Qh>m-OAmx#)U>{hk(`jk`72!$u-+^zP+;}gP` z(fq(DfPGbSZ%}Shs;i&-!K5+0A);lG@UtUR~N) zVSf-)gfCGejA>MpG3<~Y@3T$k0%;FQs+dwPjDp}njsI4xOfA%^4-Se0VXR-l?_S`)(Cvyg|8~hIW;ScyH1;FF%m~JpF9KJ~Ct*xeCexigY zi*5M=g+&Oh6NT|oEPwfFKg)OzpEm<=83G`_e$5x8%8=Cn8U`vBs#O$*G;P7n4N@u} zZijdeHx7uu={i>I%W7&!lSM^6M1SlOgq*6VfcQ6GZ1i=gfhL3FD$RO_SoMz2&wAx9 zrBCW?zuO(vf!V^lF40|p1RUTwNO}S|l>s7s1@ohRIRc;%3uc1t_gLO!2QiMF{UfEZ z?gC!c1~Z?!QUSk%b;*lo2VlM)n_W|9b9#aSRSgvg|O#qZh-X#-FjoXInW!l7UrS!MF z%Qy0f-InppJT0}{WMC0}@WClgC&vlX&JYj3$jaV&w2?!|ltI3>R}X*UZ3zLT64wa| z%&rIY7RE*Hi_(1TOj%=JGQ*u=X+DpY4Izqq#h7;cVNe>de~G)_hbmAZWN;5J$wTCX z!De}pXHBJ8eVxbhyO<%#f+KuS2*p&8>l-&*gAeg?*>V;R^CYPie0*eyHgJMb@HI*K z@H#rGlgC7z_#@<8yX(dNx8IN5N@DwLzc}tTfq|KR+V#v;>vk%?Y*jeBFuV(a42I;7 z?6H989H2fxnYjXn6o_h-8jI1~9;{oefL#_+)V|*YYWFX zo738yQT#Tos?iX>BU_v`le&8ES8wo#;ZQZ6?efMQAbWnCsY%zQrjlR^$xi~tnyn+} z!q?}G@@%apUq^S^ie~@ZH+B1PTx^$6V$dbzb0BgNUcww)tS}G~@JXog*RRp3q(;k0 zfNKw=J~!})`oItByvND>D)WhXimm}PFb1Y5oC@acPz)=wL)DKt*Ee$1*8rW@m zo$<*Km3sZ^^ckT8IW06{<|Ja@8LC?$a}alWJ z{$go6ynN;b=lcgPUlK$w?0)$*>SlW`oo0`N$f?*Z`(5glj<1jlNP}6`w=;r9ude}{ z%^ZdtYZsym*HMX8Qx12ihD<^B1b}lxC?MMj^1F5pgAnWz< zCjs1Pk!LV%qh6ap2c={Ph<$(DL%B*Z~Hn(^XE+6Q5S{U3n# z;1*JhS1PPptcjkSc!~yw0JIARVR9+X)N0b{4SY<551zc87*Oka_?dH=t*Isj-D}hN zB~3-lDN6R)iJdg>^-iLTnDqA;klQ5>H(R~RVKBC5vTNZgs@0!=c8{#%DCf#g7;BXgoc$aH+-$J-hHszWq=N zMv!+n*qU~>zEETzrNcA(lcLO9Txud~U%Sjtg_Be@Ag;@an#pcXiD-36IbL5H%x1GX z^E{lg`uWb4p5QOioyv*pZKf7qzQaFD3o`pR_gDriZ#r8W+!xaBG=}Z~97b-xdtyYb z7@an5ZcqMr6wzBY;%n;%#g{eh`U zeH?j8(=BqwL)$%I{AkNmT|Q>nOLZD2de-e*!0fOCQceMxL34+F1~QWO|HWGV*C_%2 z4P&Og@UPUG1Q1sW9~v|Fe$q+(}t`QLJ^al(3CZi2_akdZ7gH#yBRcwS$a-= zpXYc!Pknyh`~Lo(<9LqeIDWrB+NQ%?*L7a!`99z0>-BoS-t_&fR@QkM|2WT+F?sT% z+sQNeIXXdpk{=S*>XqojiJ|I`BN1%w!=*9%G-fwu8&TZk+0T&u3uzpb;N0O2``bfH zt3}n#$JYOPdLyYVJWi+M^;?Pl7vq>c{w}1B#~b)vG?flI^io$*e}BtBdmKVmN-|{q zgiCCtbmGn6#j$81y8g8|;{}zid$4tw*Q`P2bd>pUGVpA)LtP=u6M`o8M$=VMZ4M{; z8K*oDnbC@dBE}EgTzIv)1g-ol$`4(tx^`NMQTHmiSD6f5s-nxYhFILRa~TedKWHUc z{_+NtOQy7tjr<6#^#a+p~0;+fINV!e01H&}Ei5v{G zB8xEAl&6EBrli!luGbzPWsnQ^$8>F~f4raNPwg-6bFRB+pO~wS^M4v`mpO ztH7hh=P8RSe&wCaG{PE7Ip`_5?)NN%F9Vlb4j<^+Ol;lA^#>2E~PxqxEH;c|g&tx70dT z8i|hcyjn<|`E!?$bW~Pn3ENpmAL(7)CAkJ!5=I5*bPQDH#tCqTh1TqZB%_xjCuUNY z&)C3T$Jx#gEzYb^7q{B!fo_1bz78wFMOJH*PNBml8v_>!_VWWDZo1`KeoVBZNmeEc z-w;QJWV*Q7>$k+eU;nDN4AMuLGO`u(K0_G5zRQaXA9t}hX6X=`pE<^|loq_Fw=?xi z(&pwsKL`-FIU~cun@S4yZ%a(D*RF=*4L z5B+yFd_b^b9R`NRq(QGr7d)Fkf)4+TKZw*ka>sV7jx$)7v!L>!OiMk2v%0wA!ejI1 zH}w-0wdhcoX!R+chbS4>f}*+pO$7%XPtP-Q5hnYKB?=R}8)S~zhmFeB*K$!RO zPCQ)1Gnu(a)pyAjje=LJYI@Tr`$bz{WI!XmRJ#o!Id#k*vEEb+!4U-pD7W zFzBVR!zWT;9bF^fGg7p?%=i8_FNfVlqaJ@Zf4ntX!(HbldkQ+#p1^~-+~rALle z{3YtowO0DTQvSoRpQC)0+80iTVI6N0C1LKQZq!IXsqex)tJ85TY`(?WAUa+o7Vt6YjDmS)l%^7NJr3# zbk0AEb>AuSaiRY5i(d&5yL%0i16n|zpm|Z^>k%aRaKfJ4CrafAh2xcG;=(Ek>&FuI zw{oReM^lQ+IfQF**lGy#`dwf)1SjKG$T`Uf9eiE}l@UAw~Cr-U7yn`-I3ip;DDxpe} z_m)u77FmZ{PaKeFRWfUyK5(1mR>T!4QgzgIM)&5Q`*_-;M5fT5YG@}=GV!}I@ri?E zA44?=r<2n%CerU+P!*u8o-(CNm#aqXQXIM1eMG}k_z>s0L90O4BU)>%)*)I5sTH@6 zHhH^(WaL?!!GR#nPnFOF@~p$%`s@o{40;0FBR~ ztERnYmMa1^_(rPDEVzxSdMvoT^_y#5| zb&v`cIWj(!RZ@IYPb*%WxSSomgfl2Ejw>wWzQDfQ0zDi_H6@=(Alm>go|im>9bXkf zY6Jz}Rmbw!Hsq9+zdc*J)aogrEmgCp?(zknwJ+>5%A3$=s0#!{*_tD+Fb)lSKtfgc zo7C%e&Ih805y$Az!5eUYXz<~A^`-C@#+u5~ba4RgFMRjLvbo{=E@e^g%C}V&K{m@% zqH&ic&0?Z>ESx1FwT&^@(~B_8^|if2nl8$G0j-6{)C zAYq4a;}wrs0syn#gH#8?SbtS})xF=@jOZRyXJ^wDU^|Z!pfnVp&}q>f>N*2d~0MW9brmI#a)gu6YA}j{ldp%?Q3;) zpZ;;je>i$*QW*C_ON{o4HHar)6@!AD=E1A?=rHX5jf&E+YsrE!r-p{4uMvvbOsA8l zQ?K1;T{-srv^Wy2@UU<*Ho6IT&$%d+Yh=OrT^ zSMN!$Ct-KH_MhjD_q1+s&b4du_`T$1iF=gniesy)S%9>1R(!s_Reo1_+{!kwWt&g* z;-hr#{1Zgaf>|f;Qd1r4hyPFQK5Bkp1#o!<)5IZdmI;)K1ERlsK=c=sJkbPL_`p;l z^b@8y0QbMB^ZeIr%{3xfhxL}h=~PeWJKJ+QP#8`2qU)Y80X{T0hhK-d5*dLtU46>- z^4dLbl%qY@h$e4KqguUud}Ql)E8r|ai+2mL=A&kAw&e+L=KHrB!vYX_gZ8iD#umCQ z(m0-OIK^^UnlEthuhM0f}xp0%F#?QL6+k(NOLe&gH~Fy zuB@7$q)9R;MX*8S4FZLn3eA=LEC8J#)7~Em}`4Yfle%#bmIb z5t)@HbgZiH45|2$*IbQLTV(v8%Zd2h-EW2~3cj-`S;nLTn88R4vnZ-nckEqeW zZZZ!z;vS*{(!Q$;J)_lyr+o`{Bgu=?M4PDc`82-IwpLG;COt4-S!4CVycOyGV!h($ z`7Fwy;ZW{UyRFh9$AlwgjJvb9hS=b)drVKpPF!7~|i&D4~3a&Q^C#1jo7&@a9b&!)k zdN5@7gO`qyfqk5-;o#+aw>EDq8#a6p%N{Lxp7Su+fN<`n&%&GNqs})*4jvHys!1FJ zUko%;WG#R>{Pc_vat0JYiQDJp8BGLL(5(qq>OBg20lN2G?OGYHDT`k*@sMrjbL{F& zfIcW(pTKcq4)N?uz4-}V#7+oJqUO~Q{`$_AhIbjASpwOnr{+9eW*N70!NbIjSjy%< z8Ra5>`llL^k0OXC485#*E9c!AW7KWVfGh5o4;1TJ5-uM-;`qqQ8_G|!`@FD9D!N&3 zY0cVt_aS0RzPGU*(-@a4Sb`4v&i2PWIQk2X{tu=zaB(TdV1JAXxXB$-@$ctCt&Vt$ z){o1~qPz8Wa@d~+F7uZQ_igSu9Q0$-*Z-LFJbypuM-H1VWzKHm*SYiy@>QZS7khLKd4RYAy(Z?q+Tmd8NJ$^NKey8Lo7gmH}E znLx!Wnw4G#I;-U=x0`F(PD#@cmG<0FW%{9iUx{g}f6>Ig3coJPhiqkF`dVZ%suipM zwK2p0_&Y6(3WDYfZPo;u4tF5Z)c`6YOx|SH07Aj(+XUo4OG+z2U~85S{mv%Jm_a(Q zvkb25(FCV=4bNpn&>WY4ai7N@E<^l|bOA0xKcoifv`jE~!@zs#A=(w)0Cjo8#J6`g z#RJ^VGtDHv*rYO*=IV9^NPG`5G_H`jyI zo6wXtsFINq*sSQZYjM2!r$^lR^6m?4lSf3lx-^VPc)mY3wM6^eeXJeT9SlXEub7&o zE0nOo2d+&}5v%AKuXAPiDJ0;H{`Q>%8`iVrun-5KYLho^AGAk8T`gHFqK-0GJY}Su zM+7C*=3{LHu!n&^nBbj3YyYZ?m6h*fZ-xr_oN|+l(^Iq1vEjIN^A4Nvi4bG?OFyE+ zk>ozyr!Dv|Q~1}dU|jl0@*2DYgWjdI>{fj)52-`WE-#GRm)z*#*`l8CbVHl($`OT) zMz`~KjN9=a!Jcx@V4z|g2}E~)`&{mvRk~&LyY0f|ph26O+8U$U#dm4a^W0ajh&HfF z!;$AEMwNZA`xnY>zospHsR#`I>~~f1M?YTYoqoI#IA_se*pF*g!8Y`oVEZ9{2N^C_?XpNw;lcY za;9IcM|!sNp0^yIW`8duC5t|Hd|wgk9AUvDn$Gp6@T~CV@rUPj`{e#hjL`oF<9c&` z>;!oRmq&CHF)nZnLT49ee=GXfcDLhp`%8{YT{&SjHLksPZmWZR6n3p2QLUZ>7Z40A zPdrStzTk7X!ZMw;n!5j;|EBNAUe{|GKPKyu4w$^eNd`t7EfN(!KWDXYnW#Y zIt)K8vAe#YLf-Lj=EfX#mOQAdr)@`2!M~zK3@q0`dW)3R?|9AHd`&tu+vDvVwsL>`>WgM#if!m&5g#OoT{Q+*<@h`Zo zAyo<}A+ORLK?R%)enYiLepp1;`z1>KpssFUK6^UISaE-2nR|eB=)L@GkfvO2;yx#F@7G^= z1+mMbBgLU{O3}lv!wwyki>w|CfFcwyP8Rn#+r22o?7!ZGjAb;ArVU#pTzXE=YLZpe zObylvuB7x(-D&Fd%dYuZNW*S{m5DEx6Kzwo(sVO#c5@^1lQiy%w#GUfzhlGk`@3kB zx$e9;-)Ey(SfJtR618&lNj}6wMipL{jCXMY2dBWHmrZrpA;EE%+SPC?h(OI@Ym$6 z$*{pU?i29g0{JsGypq1{g=9sizix!@>QR>dG%63e!rY^~&OwW(lOo?}Sb)IVXdB$X zOik!FwAO9*Q}UE(wsx`cNOe;1h;i~*dQkp29i$E&B*Zd5-4_|yt##6RglIcA(=yX1 zw~|#hd^(?3;ly!HWF{LQW-h(etPFAsCvF?jkzHZ$8(830K6-BPo_V&Z(a>1RhlR9` zoJGBX0o*)K?J6*6@uNW|46%X?NphX*c3DHp{QC7X+327YH-~e=D)!gg9?A0_2|3WH z`0(CVHvK%R{X5%Uv?~w6j`|E1i4tdx+VeDYi*h-P*=(Iy*)&~8Pj>tJd9KB;t~9DO z8l$hG_ORM^dds@VC`Y100)yKh!aF>H;s1(V(M<1UiKb-Zk$~uzKJ6$&hA8v7h64RE z3y|8SFM&Cj{1e9cZRv;=fzeXV_eF7z0RefQbc1HD91HV zQRO&n=6V z^Hhlj0dutLApG4YiOSwWM{5e7!YzbMgQS(hB@Tjv{pZ-GoKbX(1xUx2u#jmf*;Au+ z^bsN-zEIZf%?Bj(Ox1HM+B*?D>v@3inGYB#6OCt681^)MGJO)pI9yEp1e7aExaDYG zlU;h=RJ?22z}eavcf?u3<*coHJ-(nH_`tFZH*n}*0XMAjJhBt?acOFaAut`q02BFt7+fJJat&!Z`O%o#;h^JU>#Fm35OvA>iT(^aY>A@ z52Q|1tY9vghG20tlcKHaK%h~5aXS)`ox=0PG$`{|etuHVj-Uj8VJT;Jy zcgbd?mcdR0lwzwknFBp!z>2-<{2v+a&Y%BAG*UCZBMBY{Iq2Fg$U#t6&H2qGR0|<)Gpa!7XKzd$-CtL5O5;{AZJ9;J|}l zL>fR$VLvFY)JSsG<1s#TTb7WEW}Xy3qx$#{|JQ4uuRX)bdEX|+{sXng&KIE0nR)ExzF{I5#Jnf`2{sHTR8*KQzWZK@C`DIj> z7yQLv>nf@*JS5HTm>3Oet~!@y^g;_3>=n(ty{!EEp}3#cAaCXDAE)DUolq z_R#w7_S1Ez^2W?AMYV7p>pgjWz>Pv2?+>AXYAtQ}IPBoUhsMa;NIw4Y>drU3{of?- z8{RK&KK$v_-eWb_V;gnuqB&8bu0VhaSsu!wsW;rIozUDhA%+JvzI+Q^r;}5oN2VwC zpZH|)V%ia*cyv#F#V{% z`R5N-5lF@H=gZ%MMb?06$&I4h@j`Dr?483ImGbrwc%GWGvu5oPt5#SA;(qlx&=sPD ziDmpYF_(qa?Qcx?2smt}bcPZT`|h`and=EhV!XO*N@%8ZF4kA#9^^eoir^;^&E=em zT*-OFX+m^Eoy`02^mv6Y4aTNlgpc?X71Ibus0KZ0A3c*RCi|HVsk4I8%K-@Lja(z_ zw+cj38>GA`TI)FSCT($ibufjLR&XK&K5^2v>?~Sw za&%iIZcDOsz0yLm58=}8lqrz(+6bS5A8})x1%s+iS3?R>(D$pQ3vLIdMc?tUH|Ul# zvq)Hqn;6`MIAme5R(}lrfy3qIU47{(%%DkAC-OAdCsA11M?Yu(arc^0> zTkLH4=5P0gLCZnD6bH&)JpCj?FX^*w(MpkWd1c{nlk&IENoP;Pa|ln|sU%v|lx279->6LzFW4%8|Zy8!$F%x^9ux(TY6$7j;RuVFKZchGk1x)Fb=O8;8* zv7zIxYr_32ta(_1vR`>E4K|YrDVeTmMMs1KG1MXM`|;3z-MSNnu8Vg^1vJK;l*>3;UwP(8_y># zYL>f?}zcpNY;e#+z#CF_0S zd5ndiJ&BkPCJjZLbl)C*I)8f>+P$h|Z93t&r8~ihQ{|c?*JWzE1v4M-oW+bOrNanl zs#!0t`j0V4WqFuw*YtR*ze)j1M`ZdF27lW20qc$7i)i_S(bB4lenj+Poc8?T&SFobjT|+fj&u2Flbuvn} zLo_IpH+SA)-l5OucAUPQWq`NSs9MF)o?l%4e<{U-nf{xSy#@?0_CNmzm?+ztfOdZB z3;9p1{hvY+u#g&cS`S^?f0mS6uc=6P{G8lG)f+6V>Dq%!q1s!1oq>&wG#@@yE|gm1 zBHXxFpY6Wyb|}`d!20@p;LAg!He=dSA3yWv`A&q=gPi~jS9L?R8OJXL7yiko_HTcdc(7a#{H%Zenc|hc04<_l}#95K0pAzn#mGC zF9T3nHy<4P1+@VKyzoBAMZ(*vIIDIFu6d62&*2 zB0V8C(JZof{w2!HwlH$~U8qW&AFmMJr~1pqrnTc4aXjpk=t1)dt$Ygb?i8*OW*~a2 z_uQN0kboB$iXCmKmAKq1(NaMUXLB0KYTQ37o<(Q>Wt8RsX@RBtqPt$BXLP{fh&C#1 z$Fs6X&PiOOM(RkAq4cuD4;^$GLz^~_f7Kv zlnYjVC;3$ObH}XA9vOE;*L5(fpmTlD_2YABh9`ywj%pPyOp#oclRZHAqx;`Anp(Oo zB`=ovp)vN`?$XA&r^0l5Pz5&;po>-lJ{s)?-G%B7WDkec6DmR^>^v@ZoeufbC->~j z?pxYQD1FzQ#1kHIyxDj5cBM9>Z-JH;)WR@o%8rHj@A5oioOHe=d;L-RYFPjyc(rm0 z95oA4e;Fdk&o4PI^!Q^l(;$_8dNU}KQO#-D-hb`2e))D*jj127B}+% zU9iI%rj%RuXDkyr<#ct+d%i9?a~UbOMGDs?T4zTom{v&~|A_r{p8YhKIsAWg`8gm8 zMn_j?TH}i3Ah{&Om+hJE?*6)NQBEPpit_U4W1&LNB)yu1_UA>s-;s)NvlZ|Ip6Tbc z_3Ny0r#wrkN}6Tq8a$3G)*Do^YWgQDp{7y~Kg-glDlP>-(m@6Tkrk8*j@Fsi0L2xV z4g}^OeqQDSGMfybnAeP=Lx7JYMAmqB@ct0t{1aVtI}m-qs^cH?^kvO7;iSn*)^es8GLjf zRyUTMhZ02BHPaEGER+DDT(y~l{k2xA0OoZQ0h&6Ch+lDZN$MEm?+WpK{UX)bUny7$ z_~_4|`ku0S(B!A%I;6>T^v&SNe06*<6$iz9+|wdiEZNew+o4%H2F>E}d1rmSps>&8KT%759QJvdb5o7y1(QZUu z8~?u!9Z@N8RK@No9zT8Zk6w$|i}&AOd=jw5riOC^A!zjnef|c(F&6-T-VXWYW(!B$ zR7TUP=DvNCPxdvSdz7bzWis|MlNeHTEu>Um4Zpw1HGB=mE^%Q2sPC>0u8F3+=N)}v zrmS<#BKpn?={MH1$_UV81RGC9&~(`A@_pV-SVXL?%;OGV2m2a^J!Nhvn$*Q8R?l`? z0X6jBz0v-$ZjG;&er|d$#Y675#tEi~TVqUzrL|N?7RNvk$pqNt4O3lc1%-^07)#%v zQQ7{zRVC-X_C;Gxj0EUcw&=+u-9W@9v00gt3#hw#HlX2&L}4v;|8)ym$wd2kog|}R zW5olhnVE?2>fB_~%ev`gDel-~=MGK%etz}m=>>pCPT~(8fISE;kAMy!`D=16r&?|f zK^0-#(Ee3FUvqO9X5NW+!wglUx~}@m?wWus{YK(BWVe_Fnl52)AB+-04`@#(vOqI; z%u4bmUdZ3%0%bPD2eF3+m=`yZ(U<*#Z~3ChUqgg`KWACn@^;#I-;~uC*e@^Y*IA`B zoHHq-v@Ev~Y-V32DfKlgC#}H4N|!h@h%JUJS>>{t2^EpwL?Qg`6PFoARO?qUcKoIA z%W~qkEPQRU`OdBXd0IeWZAxBEJcQgqMS$7j13Z%TXtN;r@KD1Tr+1fM$Yw@1(*=j> zrnXo?TUF)YDU=$62C#p-GJ77A1ci0f8sCrB2RfV5TZo;fvk`_8f1mwy!rD=f-gJHZbH{2VgXG6 z0j{w-N`k+MR$c$`BiwpSrQlkfolWBJ?ozJ|gB<}I*Ez^W*FU~9@qR7zJy75^h3?V& z?~W9IAEA_jthbn8|N4)|`8O#)fS`YRz8vO5ve)hA+G%UC5SKqc=c7`+4`)mx;o*F)geWywE&s+ zbwOyw=cBKWzC?GrH}CRR_a%(GlvAQ^^cUXamvStHGN3ongm~jJ`yf4LUbVS=cwd?$ z|H4RfR`anby(!J5yeFd3jnYE%Lh}wU;pfiyH@siK<&=-L`YleUx6Eg+RIK`bTj}Uo zI5p8BwulvDaM1buGwo;z1;aWNc^xt}V?g9hbeV*rj>MBQ60%PUB(&e9+&;=}%uTTG?EVcZD z&qw?TgNSJ}&jCEBN_J)XX4LFrrl(9)EHUW^XwjI@2ee_7v?m~jQ!IpZvZwIyzMDsm z#RZ2PO=@NjP!rxQo(6E5-E++R>Y&N0QM++$libCb^@gj2TAZt2a;R2MK#qOGk;b#1 z4ovf2o{5TU>-sV(aQ?lKabdn*1|A1%R-Jo3YhapDpqT<+DaQvH&Y<1`({GdHD3NPsn2gnhnkmYT@C1k_q?@<@%OFqLaU$heC%IM;M%5=m{sj)rc_)8PHD8# zg9i4wHN8bK{;fR}hkEKP6XX4yb!v!l1tM52mUAV{p{LA9(_=P<{jaoJsvUnk;d>-$ z{z&*(vP#rBh%G~pwor498KI#=874(YpAsPqWg5gLrAz{L2sV^zqU}HokTp%;OqXBgJrHW8QAx}hN@ROPcd z{;Xzl)+6P|ds5f+pF2~gXTY1Fk%`vM!n)=iiZ{3Z-3&5<>8#sZLvN{fZP#+$aJex<1?k)v@NS~@cEo=w5^MB ztB}c3b1632s;pwzd=WtM&;PCd^8dZ|vg`lKxH63xdT7T9qscTi(T%)17w-+1SPOGI zsi1tSiqol4wXsnoP?P4t%utpe#?~gG6e+;wCmR0gDUJQY+oWsdrM<^0<1OgQ39k}K z*88}OB+l!<4Y+^MO47HM`5Ns6d7^cimYxNY`4Z16_Ar#hyh4*$o8WF8ME+@6Q%Didk@fxH2L!^=r%I z1U^zr6yFrmmd&J2|m+0`dcjHFzyer_`KX7<6pE9Kl@1~k_0`p-MS=w}&X%n~2s zeH2F@gM&iosZWDcb$1&<4ba>}W6D{3hTU%kNSvASX_gjl`t#JB5hrN15I&9@0kZr zdmdSi1rfH_hJynY$MBwua&Tj2h=x@zEsk;f!^bzN>Gf#TyVJ)P7StD&lW&NSu3jE5 z@2Z>qz2*%T^!uE7JQj2 zr{?4zV|lXQG1D}Tk*g*IQbhH69(yx+Y(8__E(Q;Jjdu)99i-3(r`{*kff ze9IgHVss$0B$RkhYn++2qErPNTAa8}=rVbS;aEH+6TebU*SD}*Jz=Q8cHmRopSwjE z{WQ4)y})4v!&mmHPCwf}mNY4hS&oa^n4LXf4~u3UWQ~E=BV|Rnhc!wNZsck5k))Gn z@^55ck(Yln8UE-MLKdF&tRT_v4; z0y3j>d@%dPZ4C5g42DjxTob&hFMOgB-kp-|P*nEpoY`U@(L`M`=C#l9+>5C!;d`n* z85Il2bN|&vvO4^s)wTzJ!-RjHC5ab#gM;U~-XRhT4flQc5VPHW>$&dxYrJZ5rFSYn z;r7t9i*_1)pbj#W$eZ<=^5edb&~BvR3oZxRw!b}Wl|7|kM)J|DdV9yKR%6^7@+adi z8QZjqfxOeLA|Vd?YYktaB^4FDkFTdDQQP+ir(BEB8NR!DfW6)0;L{n;T*TPK2Zb@F z$m;|GqG+{>u)WQ|bw*We`bThQ8nnaU-Zcbos4D72#WIP67$nvQwhuhnaH|n;@b@z; zOD6sqbtWH?F*@9Igu}mk_)@LGd7YCFbtNm>3~tv_f`kYKw%6>oj3aRqXf6lq zNP19Yqh(bxJW*c#`m?P|b!J}{@Ur}a)iMP;1@QtO2bP*h_(!lq?#LIAT3-k8eh-b$ zPaD6p-7pO3)E7M~p!-!=C@Shv=fGpsCm1J66?upjGyx4k3o1>Q_iO}vx*=8llPuSZ z`cfMwPhJYubIn{AY&ShK(DwPDLd4>;!wMD?Wi_wu%HvC-w^y=i&}VE6lcS}FN(Xd& zGAo*M*9Ouziw>Kb{ahaQbX+#oFo;NUo)^bU-_eo&yxM zX1f=VafN;~j)HzhVL*-Wlvvn~Ti*YXTpGYeR18gluEiQfm*z6$Ru7?p_8ft(<6vT0 zAprri!5D3EKStmMt@bo;G}_b8>*Wztf0vbU=Hq9vXCvZDpfbRwZ4%JcVa&r&Mg|

o=a99`?@cX+8(z@_M z@Y?|h8EmG&qb~x&GF8U1F5rM(dgtr4aC~bRmN<0BuTLBJ^_>8y{1+v3eG@3@=h>D) zZ(RcRdVBF!GMS6t)Q@ggo3ub_{y@`n12|`b^Pfy|2RL?hfueO#9}b-ZuW6dk2FMvi znAbBfxZb5l|Nq_pUpDsUs9FfGh9KqM&Kb7m7*ewrT~-Z!4UW2ay4Y;2azzT&LuGBu zWOPLDa6!3sD#}T?QTM`bR)MK?e}h5Bbe!zrXZD?7&3(O%-ZAj{Ke6WiH?Ohl`_O*_ D+LV^5 literal 0 HcmV?d00001 diff --git a/quickstart/quickstart-docker.rst b/quickstart/quickstart-docker.rst new file mode 100644 index 000000000000..e9d84ca164f5 --- /dev/null +++ b/quickstart/quickstart-docker.rst @@ -0,0 +1,129 @@ +.. _ksql_quickstart: + + +Docker Setup for KSQL +===================== + +**Table of Contents** + +.. contents:: + :local: + + +This part of the quickstart will guide you through the steps to setup a Kafka cluster and start KSQL for Docker environments. Once you complete these steps, you can start using KSQL to query the Kafka cluster. + + +Start a Kafka cluster +--------------------- + +Do not run KSQL against a production cluster, since KSQL is in developer preview. + +As a pre-requisite install `Docker for Mac `__. If you do not have MacOS, you can install Docker on another `platform `__ + + +1. Clone the Confluent KSQL repository: + +.. sourcecode:: bash + + $ git clone https://github.com/confluentinc/ksql + +2. Change into the quickstart directory. + +.. sourcecode:: bash + + $ cd ksql/quickstart + +3. Launch the KSQL quickstart in Docker + +.. sourcecode:: bash + + $ docker-compose up -d + + +The next three steps are optional verification steps to ensure your environment is properly setup. + +4. Verify six Docker containers were created: + +.. sourcecode:: bash + + $ docker-compose ps + + Name Command State Ports + ------------------------------------------------------------------------------------------------------------------------- + quickstart_kafka_1 /etc/confluent/docker/run Up 0.0.0.0:29092->29092/tcp, 0.0.0.0:9092->9092/tcp + quickstart_ksql-cli_1 perl -e while(1){ sleep 99 ... Up + quickstart_ksql-datagen-pageviews_1 bash -c echo Waiting for K ... Up + quickstart_ksql-datagen-users_1 bash -c echo Waiting for K ... Up + quickstart_schema-registry_1 /etc/confluent/docker/run Up 0.0.0.0:8081->8081/tcp + quickstart_zookeeper_1 /etc/confluent/docker/run Up 2181/tcp, 2888/tcp, 0.0.0.0:32181->32181/tcp, 3888/tcp + + +5. The docker-compose file already runs a data generator that pre-populates two Kafka topics ``pageviews`` and ``users`` with mock data. Verify that the data generator created two Kafka topics, including ``pageviews`` and ``users``. + +.. sourcecode:: bash + + $ docker-compose exec kafka kafka-topics --zookeeper zookeeper:32181 --list + _confluent-metrics + _schemas + pageviews + users + +6. Use the ``kafka-console-consumer`` to view a few messages from each topic. The topic ``pageviews`` has a key that is a mock timestamp and a value that is in ``DELIMITED`` format. The topic ``users`` has a key that is the user id and a value that is in ``Json`` format. + +.. sourcecode:: bash + + $ docker-compose exec zookeeper kafka-console-consumer --topic pageviews --bootstrap-server kafka:29092 --from-beginning --max-messages 3 --property print.key=true + 1491040409254 1491040409254,User_5,Page_70 + 1488611895904 1488611895904,User_8,Page_76 + 1504052725192 1504052725192,User_8,Page_92 + ... + + $ docker-compose exec zookeeper kafka-console-consumer --topic users --bootstrap-server kafka:29092 --from-beginning --max-messages 3 --property print.key=true + User_2 {"registertime":1509789307038,"gender":"FEMALE","regionid":"Region_1","userid":"User_2"} + User_6 {"registertime":1498248577697,"gender":"OTHER","regionid":"Region_8","userid":"User_6"} + User_8 {"registertime":1494834474504,"gender":"MALE","regionid":"Region_5","userid":"User_8"} + ... + + +Start KSQL +---------- + +1. From the host machine, start KSQL on the container. + +.. sourcecode:: bash + + $ docker-compose exec ksql-cli ksql-cli local --bootstrap-server kafka:29092 + ... + ksql> + +3. Return to the `main KSQL quickstart `__ to start querying the data in the Kafka cluster. + + +Produce more topic data +----------------------- + +The docker-compose file automatically runs a data generator that continuously produces data to two Kafka topics ``pageviews`` and ``users``. No further action is required if you want to use just the data available. You can return to the `main KSQL quickstart `__ to start querying the data in these two topics. + +However, if you want to produce additional data, you can use any of the following methods. + +* Produce Kafka data with the Kafka commandline ``kafka-console-producer``. The following example generates data with a value in DELIMITED format + +.. sourcecode:: bash + + $ docker-compose exec kafka kafka-console-producer --topic t1 --broker-list kafka:29092 --property parse.key=true --property key.separator=: + key1:v1,v2,v3 + key2:v4,v5,v6 + key3:v7,v8,v9 + key1:v10,v11,v12 + +* Produce Kafka data with the Kafka commandline ``kafka-console-producer``. The following example generates data with a value in Json format + +.. sourcecode:: bash + + $ docker-compose exec kafka kafka-console-producer --topic t2 --broker-list kafka:29092 --property parse.key=true --property key.separator=: + key1:{"id":"key1","col1":"v1","col2":"v2","col3":"v3"} + key2:{"id":"key2","col1":"v4","col2":"v5","col3":"v6"} + key3:{"id":"key3","col1":"v7","col2":"v8","col3":"v9"} + key1:{"id":"key1","col1":"v10","col2":"v11","col3":"v12"} + +* If advanced Docker users want to run the data generator with different options, edit the Docker compile file and modify how the containers ``ksql-datagen-users`` and ``ksql-datagen-pageviews`` invoke the data generator. diff --git a/quickstart/quickstart-non-docker.rst b/quickstart/quickstart-non-docker.rst new file mode 100644 index 000000000000..3aa727c47e73 --- /dev/null +++ b/quickstart/quickstart-non-docker.rst @@ -0,0 +1,130 @@ +.. _ksql_quickstart: + + +Non-Docker Setup for KSQL +========================= + +**Table of Contents** + +.. contents:: + :local: + + +This part of the quickstart will guide you through the steps to setup a Kafka cluster and start KSQL for non-Docker environments. Once you complete these steps, you can start using KSQL to query the Kafka cluster. + + +Start a Kafka cluster +--------------------- + +Do not run KSQL against a production cluster, since KSQL is in developer preview. + +You will need to download and install a Kafka cluster on your local machine. This cluster consists of a single Kafka broker along with a single-node ZooKeeper ensemble and an optional single Schema Registry instance. + +1. Install Oracle Java JRE or JDK >= 1.7 on your local machine + +2. Download and install Confluent Platform 3.3.0, which includes a Kafka broker, ZooKeeper, Schema Registry, REST Proxy, and Kafka Connect. `Install `__ Confluent Platform directly onto a Linux server. + +3. If you installed Confluent Platform via tar or zip, change into the installation directory. The paths and commands used throughout this quickstart assume that your are in this installation directory: + +.. sourcecode:: bash + + $ cd confluent-3.3.0/ + +4. Start the Confluent Platform using the new Confluent CLI (part of the free Confluent Open Source distribution). ZooKeeper is listening on ``localhost:2181``, Kafka broker is listening on ``localhost:9092``, and Confluent Schema Registry is listening on ``localhost:8081``. + +.. sourcecode:: bash + + $ ./bin/confluent start + Starting zookeeper + zookeeper is [UP] + Starting kafka + kafka is [UP] + Starting schema-registry + schema-registry is [UP] + Starting kafka-rest + kafka-rest is [UP] + Starting connect + connect is [UP] + + +Start KSQL +---------- + +1. Clone the Confluent KSQL repository: + +.. sourcecode:: bash + + $ git clone https://github.com/confluentinc/ksql + +2. Change into the KSQL directory: + +.. sourcecode:: bash + + $ cd ksql + +3. Compile the code: + +.. sourcecode:: bash + + $ mvn clean install + +4. Start KSQL. Use the ``local`` argument for the developer preview KSQL release because it starts the KSQL engine locally. + +.. sourcecode:: bash + + $ ./bin/ksql-cli local + ... + ksql> + +5. Refer to the steps below to generate data to the Kafka cluster. + + + +Produce topic data +------------------ + +The `main KSQL quickstart page ` assumes you have run at least the following three steps to produce data to two Kafka topics ``pageviews`` and ``users`` in your Kafka cluster. So if you're not using Docker, when automatically generates this data, you have to do these steps manually + +1. Assuming you have already completed the steps above to compile the KSQL code, verify that you +have a compiled ``jar`` file ``ksql-examples/target/ksql-examples-0.1-SNAPSHOT-standalone.jar`` +for data generation. + +.. sourcecode:: bash + + $ ls ksql-examples/target/ksql-examples-0.1-SNAPSHOT-standalone.jar + +2. Produce Kafka data to a topic ``pageviews`` using the provided data generator. The following example continuously generates data with a value in DELIMITED format + +.. sourcecode:: bash + + $ java -jar ksql-examples/target/ksql-examples-0.1-SNAPSHOT-standalone.jar + quickstart=pageviews format=delimited topic=pageviews maxInterval=10000 + +3. Produce Kafka data to a topic ``users`` using the provided data generator. The following example continuously generates data with a value in Json format + + .. sourcecode:: bash + + $ java -jar ksql-examples/target/ksql-examples-0.1-SNAPSHOT-standalone.jar quickstart=users + format=json topic=users maxInterval=10000 + +At this point you may return to the `main KSQL quickstart page `__ to start querying the Kafka cluster. If you would like to do additional testing with topic data produced from the commandline tools: + +4. You can produce Kafka data with the Kafka commandline ``kafka-console-producer``. The following example generates data with a value in DELIMITED format + +.. sourcecode:: bash + + $ kafka-console-producer --topic t1 --broker-list localhost:9092 --property parse.key=true --property key.separator=: + key1:v1,v2,v3 + key2:v4,v5,v6 + key3:v7,v8,v9 + key1:v10,v11,v12 + +5. The following example generates data with a value in Json format + +.. sourcecode:: bash + + $ kafka-console-producer --topic t2 --broker-list localhost:9092 --property parse.key=true --property key.separator=: + key1:{"id":"key1","col1":"v1","col2":"v2","col3":"v3"} + key2:{"id":"key2","col1":"v4","col2":"v5","col3":"v6"} + key3:{"id":"key3","col1":"v7","col2":"v8","col3":"v9"} + key1:{"id":"key1","col1":"v10","col2":"v11","col3":"v12"} diff --git a/quickstart/quickstart.rst b/quickstart/quickstart.rst new file mode 100644 index 000000000000..278db5e07217 --- /dev/null +++ b/quickstart/quickstart.rst @@ -0,0 +1,209 @@ +.. _ksql_quickstart: + +KSQL Quickstart +=============== + +**Table of Contents** + +.. contents:: + :local: + + +Welcome to the quickstart guide for KSQL! + +The goal of this quickstart guide is to demonstrate a simple workflow using KSQL to write streaming queries against data in Kafka. + + +Setup +----- + +Because KSQL queries data in a Kafka cluster, you will need to bring up a Kafka cluster, including ZooKeeper and a Kafka broker. Do not run KSQL against a production Kafka cluster while KSQL is in developer preview. + +1. Bring up a Kafka cluster and start KSQL. + +* `Follow these instructions if you are using Docker `__ (we recommend Docker for simplicity) +* `Follow these instructions if you are not using Docker `__ + +2. After you have successfully started the Kafka cluster and started KSQL, you will see the KSQL prompt: + +.. sourcecode:: bash + + ksql> + +3. KSQL provides a structured query language to query Kafka data, so you need some data to query. For this quickstart, you will produce mock streams to the Kafka cluster. + +* If you are using our Docker Compose files, a Docker container is already running with a data generator that is continuously producing Kafka messages to the Kafka cluster. No further action is required +* If you are not using our Docker environment, then follow these `instructions `__ to generate data to the Kafka cluster + + + +Create a STREAM and TABLE +------------------------- + +This KSQL quickstart shows examples querying data from Kafka topics called ``pageviews`` and ``users`` using the following schemas: + +.. image:: https://github.com/confluentinc/ksql/blob/master/quickstart/ksql-quickstart-schemas.jpg + :width: 200px + +Before proceeding, please check: + +* In the terminal window where you started KSQL, you see the ``ksql>`` prompt +* If you are not using Docker, you must manually have run the data generator to produce topics called ``pageviews`` and ``users``. If you haven't done this, please follow these `instructions `__ to generate data. (Docker compose file automatically runs the data generator) + + +1. Create a STREAM ``pageviews_original`` from the Kafka topic ``pageviews``, specifying the ``value_format`` of ``DELIMITED``. Describe the new STREAM. Notice that KSQL created additional columns called ``ROWTIME``, which corresponds to the Kafka message logstamp time, and ``ROWKEY``, which corresponds to the Kafka message key. + +.. sourcecode:: bash + + ksql> CREATE STREAM pageviews_original (viewtime bigint, userid varchar, pageid varchar) WITH (kafka_topic='pageviews', value_format='DELIMITED'); + + ksql> DESCRIBE pageviews_original; + + Field | Type + ---------------------------- + ROWTIME | BIGINT + ROWKEY | VARCHAR(STRING) + VIEWTIME | BIGINT + USERID | VARCHAR(STRING) + PAGEID | VARCHAR(STRING) + +2. Create a TABLE ``users_original`` from the Kafka topic ``users``, specifying the ``value_format`` of ``JSON``. Describe the new TABLE. + +.. sourcecode:: bash + + ksql> CREATE TABLE users_original (registertime bigint, gender varchar, regionid varchar, userid varchar) WITH (kafka_topic='users', value_format='JSON'); + + ksql> DESCRIBE users_original; + + Field | Type + -------------------------------- + ROWTIME | BIGINT + ROWKEY | VARCHAR(STRING) + REGISTERTIME | BIGINT + GENDER | VARCHAR(STRING) + REGIONID | VARCHAR(STRING) + USERID | VARCHAR(STRING) + +3. Show all STREAMS and TABLES. + +.. sourcecode:: bash + + ksql> SHOW STREAMS; + + Stream Name | Kafka Topic | Format + ----------------------------------------------------------------- + PAGEVIEWS_ORIGINAL | pageviews | DELIMITED + + ksql> SHOW TABLES; + + Table Name | Kafka Topic | Format | Windowed + -------------------------------------------------------------- + USERS_ORIGINAL | users | JSON | false + + +Write Queries +------------- + +1. Use ``SELECT`` to create a query that returns data from a STREAM. To stop viewing the data, press ``. You may optionally include the ``LIMIT`` keyword to limit the number of rows returned in the query result. Note that exact data output may vary because of the randomness of the data generation. + +.. sourcecode:: bash + + ksql> SELECT pageid FROM pageviews_original LIMIT 3; + Page_24 + Page_73 + Page_78 + LIMIT reached for the partition. + Query terminated + ksql> + +2. Create a persistent query by using the ``CREATE STREAM`` keywords to precede the ``SELECT`` statement. Unlike the non-persistent query above, results from this query are written to a Kafka topic ``PAGEVIEWS_FEMALE``. The query below enriches the ``pageviews`` STREAM by doing a ``LEFT JOIN`` with the ``users_original`` TABLE on the user ID, where a condition is met. + +.. sourcecode:: bash + + ksql> CREATE STREAM pageviews_female AS SELECT users_original.userid AS userid, pageid, regionid, gender FROM pageviews_original LEFT JOIN users_original ON pageviews_original.userid = users_original.userid WHERE gender = 'FEMALE'; + + ksql> DESCRIBE pageviews_female; + Field | Type + ---------------------------- + ROWTIME | BIGINT + ROWKEY | VARCHAR(STRING) + USERID | VARCHAR(STRING) + PAGEID | VARCHAR(STRING) + REGIONID | VARCHAR(STRING) + GENDER | VARCHAR(STRING) + +3. Use ``SELECT`` to view query results as they come in. To stop viewing the query results, press ``. This stops printing to the console but it does not terminate the actual query. The query continues to run in the underyling Kafka Streams application. + +.. sourcecode:: bash + + ksql> SELECT * FROM pageviews_female; + 1502477856762 | User_2 | User_2 | Page_55 | Region_9 | FEMALE + 1502477857946 | User_5 | User_5 | Page_14 | Region_2 | FEMALE + 1502477858436 | User_3 | User_3 | Page_60 | Region_3 | FEMALE + ^CQuery terminated + ksql> + +4. Create a new persistent query where another condition is met, using ``LIKE``. Results from this query are written to a Kafka topic called ``pageviews_enriched_r8_r9``. + +.. sourcecode:: bash + + ksql> CREATE STREAM pageviews_female_like_89 WITH (kafka_topic='pageviews_enriched_r8_r9', value_format='DELIMITED') AS SELECT * FROM pageviews_female WHERE regionid LIKE '%_8' OR regionid LIKE '%_9'; + +5. Create a new persistent query that counts the pageviews for each region and gender combination in a `tumbling window `__ of 30 seconds when the count is greater than 1. Results from this query are written to a Kafka topic called ``PAGEVIEWS_REGIONS``. + +.. sourcecode:: bash + + ksql> CREATE TABLE pageviews_regions AS SELECT gender, regionid , COUNT(*) AS numusers FROM pageviews_female WINDOW TUMBLING (size 30 second) GROUP BY gender, regionid HAVING COUNT(*) > 1; + + ksql> DESCRIBE pageviews_regions; + + Field | Type + ---------------------------- + ROWTIME | BIGINT + ROWKEY | VARCHAR(STRING) + GENDER | VARCHAR(STRING) + REGIONID | VARCHAR(STRING) + NUMUSERS | BIGINT + +6. Use ``SELECT`` to view results from the above query. + +.. sourcecode:: bash + + ksql> SELECT regionid, numusers FROM pageviews_regions LIMIT 5; + Region_3 | 4 + Region_3 | 5 + Region_6 | 5 + Region_6 | 6 + Region_3 | 8 + LIMIT reached for the partition. + Query terminated + ksql> + +7. Show all queries. + +.. sourcecode:: bash + + ksql> SHOW QUERIES; + + Query ID | Kafka Topic | Query String + ------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- + 1 | PAGEVIEWS_FEMALE | CREATE STREAM pageviews_female AS SELECT users_original.userid AS userid, pageid, regionid, gender FROM pageviews_original LEFT JOIN users_original ON pageviews_original.userid = users_original.userid WHERE gender = 'FEMALE'; + 2 | pageviews_enriched_r8_r9 | CREATE STREAM pageviews_female_like_89 WITH (kafka_topic='pageviews_enriched_r8_r9', value_format='DELIMITED') AS SELECT * FROM pageviews_female WHERE regionid LIKE '%_8' OR regionid LIKE '%_9'; + 3 | PAGEVIEWS_REGIONS | CREATE TABLE pageviews_regions AS SELECT gender, regionid , COUNT(*) AS numusers FROM pageviews_female WINDOW TUMBLING (size 30 second) GROUP BY gender, regionid HAVING COUNT(*) > 1; + + +Terminate and Exit +------------------ + +1. Until you terminate a query, it will run continuously as a Kafka Streams application. From the output of ``SHOW QUERIES;`` identify a query ID you would like to terminate. For example, if you wish to terminate query ID ``2``: + +.. sourcecode:: bash + + ksql> terminate 2; + +2. To exit from KSQL application, from the KSQL prompt ``ksql>``, type 'exit'. + +.. sourcecode:: bash + + ksql> exit +