diff --git a/.gitignore b/.gitignore new file mode 100644 index 000000000..0be1a43d1 --- /dev/null +++ b/.gitignore @@ -0,0 +1,14 @@ +/target/ + +# Eclipse and Maven +.classpath +.project +.settings +target +.cache + +# Intellij +.idea +# Intellij recommends to share iml files, however, better don't share files which might be outdated +*.iml + diff --git a/LICENSE b/LICENSE new file mode 100644 index 000000000..d4b54c04f --- /dev/null +++ b/LICENSE @@ -0,0 +1,21 @@ +The MIT License (MIT) + +Copyright (c) 2016 EXASOL AG + +Permission is hereby granted, free of charge, to any person obtaining a copy +of this software and associated documentation files (the "Software"), to deal +in the Software without restriction, including without limitation the rights +to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +copies of the Software, and to permit persons to whom the Software is +furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in all +copies or substantial portions of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE +SOFTWARE. diff --git a/README.md b/README.md index de10dc84b..cdf163c33 100644 --- a/README.md +++ b/README.md @@ -1 +1,25 @@ -# virtual-schemas \ No newline at end of file +# Virtual Schemas + +Virtual schemas provide a powerful abstraction to conveniently access arbitrary data sources. Virtual schemas are a kind of read-only link to an external source and contain virtual tables which look like regular tables except that the actual data are not stored locally. + +After creating a virtual schema, its included tables can be used in SQL queries and even combined with persistent tables stored directly in EXASOL, or with other virtual tables from other virtual schemas. The SQL optimizer internally translates the virtual objects into connections to the underlying systems and implicitly transfers the necessary data. SQL conditions are tried to be pushed down to the data sources to ensure minimal data transfer and optimal performance. + +That's why this concept creates a kind of logical view on top of several data sources which could be databases or other data services. By that, you can either implement a harmonized access layer for your reporting tools. Or you can use this technology for agile and flexible ETL processing, since you don't need to change anything in EXASOL if you change or extend the objects in the underlying system. + +Please note that virtual schemas are part of the Advanced Edition of EXASOL. + +For further details about the concept, usage and examples, please see the corresponding chapter in our EXASOL User Manual. + + +## API Specification + +The subdirectory [doc](doc) contains the API specification for virtual schema adapters. + + +## JDBC Adapter + +The subdirectory [jdbc-adapter](jdbc-adapter) contains the JDBC adapter which allows to integrate any kind of JDBC data source which provides a Linux JDBC driver. + +## Python Redis Demo Adapter + +The subdirectory [python-redis-demo-adapter](python-redis-demo-adapter) contains a demo adapter for Redis writting in Python. This adapter was created to easily demonstrate the key concepts in a real, but very simple implementation. If you want to write your own adapter, this might be the right code to get a first impression what you'll have to develop. diff --git a/doc/README.md b/doc/README.md new file mode 100644 index 000000000..22fe20196 --- /dev/null +++ b/doc/README.md @@ -0,0 +1,1093 @@ +# Virtual Schema API Documentation + +## Table of Contents +- [Introduction](#introduction) +- [Requests and Responses](#requests-and-responses) + - [Create Virtual Schema](#create-virtual-schema) + - [Refresh](#refresh) + - [Set Properties](#set-properties) + - [Drop Virtual Schema](#drop-virtual-schema) + - [Get Capabilities](#get-capabilities) + - [Pushdown](#pushdown) +- [Embedded Commonly Used Json Elements](#embedded-commonly-used-json-elements) + - [Schema Metadata Info](#schema-metadata-info) + - [Schema Metadata](#schema-metadata) +- [Expressions](#expressions) + - [Table](#table) + - [Column Lookup](#column-lookup) + - [Literal](#literal) + - [Predicates](#predicates) + - [Scalar Functions](#scalar-functions) + - [Aggregate Functions](#aggregate-functions) + +## Introduction + +There are the following request and response types: + +| Type | Called ... | +| :-------------------------- | :------------- | +| **Create Virtual Schema** | ... for each ```CREATE VIRTUAL SCHEMA ...``` statement | +| **Refresh** | ... for each ```ALTER VIRTUAL SCHEMA ... REFRESH ...``` statement. | +| **Set Properties** | ... for each ```ALTER VIRTUAL SCHEMA ... SET ...``` statement. | +| **Drop Virtual Schema** | ... for each ```DROP VIRTUAL SCHEMA ...``` statement. | +| **Get Capabilities** | ... whenever a virtual table is queried in a ```SELECT``` statement. | +| **Pushdown** | ... whenever a virtual table is queried in a ```SELECT``` statement. | + +We describe each of the types in the following sections. + +**Please note:** To keep the documentation concise we defined the elements which are commonly in separate sections below, e.g. ```schemaMetadataInfo``` and ```schemaMetadata```. + +## Requests and Responses + +### Create Virtual Schema + +Informs the Adapter about the request to create a Virtual Schema, and asks the Adapter for the metadata (tables and columns). + +The Adapter is allowed to throw an Exception if the user missed to provide mandatory properties or in case of any other problems (e.g. connectivity). + +**Request:** + +```json +{ + "type": "createVirtualSchema", + "schemaMetadataInfo": { + ... + } +} +``` + +**Response:** + +```json +{ + "type": "createVirtualSchema", + "schemaMetadata": { + ... + } +} +``` + +Notes +* ```schemaMetadata``` is mandatory. However, it is allowed to contain no tables. + + +### Refresh + +Request to refresh the metadata for the whole Virtual Schema, or for specified tables. + +**Request:** + +```json +{ + "type": "refresh", + "schemaMetadataInfo": { + ... + }, + "requestedTables": ["T1", "T2"] +} +``` + +Notes +* ```requestedTables``` is optional. If existing, only the specified tables shall be refreshed. The specified tables do not have to exist, it just tell Adapter to update these tables (which might be changed, deleted, added, or non-existing). + +**Response:** + +```json +{ + "type": "refresh", + "schemaMetadata": { + ... + }, + "requestedTables": ["T1", "T2"] +} +``` + +Notes +* ```schemaMetadata``` is optional. It can be skipped if the adapter does not want to refresh (e.g. because he detected that there is no change). +* ```requestedTables``` must exist if and only if the element existed in the request. The values must be the same as in the request (to make sure that Adapter only refreshed these tables). + +### Set Properties + +Request to set properties. The Adapter can decide whether he needs to send back new metadata. The Adapter is allowed to throw an Exception if the user provided invalid properties or in case of any other problems (e.g. connectivity). + +**Request:** + +```json +{ + "type": "setProperties", + "schemaMetadataInfo": { + ... + }, + "properties": { + "JDBC_CONNECTION_STRING": "new-jdbc-connection-string", + "NEW_PROPERTY": "value of a not yet existing property" + "DELETED_PROPERTY": null + } +} +``` + +**Response:** + +```json +{ + "type": "setProperties", + "schemaMetadata": { + ... + } +} +``` + +Notes +* Request: A property set to null means that this property was asked to be deleted. Properties set to null might also not have existed before. +* Response: ```schemaMetadata``` is optional. It only exists if the adapter wants to send back new metadata. The existing metadata are overwritten completely. + + +### Drop Virtual Schema + +Inform the Adapter that a Virtual Schema is about to be dropped. The Adapter can update external dependencies if he has such. The Adapter is not expected to throw an exception, and if he does, it will be ignored. + +**Request:** + +```json +{ + "type": "dropVirtualSchema", + "schemaMetadataInfo": { + ... + } +} +``` + +**Response:** + +```json +{ + "type": "dropVirtualSchema" +} +``` + + +### Get Capabilities + +Request the list of capabilities supported by the Adapter. Based on these capabilities, the database will collect everything that can be pushed down in the current query and sends a pushdown request afterwards. + +**Request:** + +```json +{ + "type": "getCapabilities", + "schemaMetadataInfo": { + ... + } +} +``` + +**Response:** + +```json +{ + "type": "getCapabilities", + "capabilities": [ + "ORDER_BY_COLUMN", + "AGGREGATE_SINGLE_GROUP", + "LIMIT", + "AGGREGATE_GROUP_BY_TUPLE", + "FILTER_EXPRESSIONS", + "SELECTLIST_EXPRESSIONS", + "SELECTLIST_PROJECTION", + "AGGREGATE_HAVING", + "ORDER_BY_EXPRESSION", + "AGGREGATE_GROUP_BY_EXPRESSION", + "LIMIT_WITH_OFFSET", + "AGGREGATE_GROUP_BY_COLUMN", + "FN_PRED_LESSEQUALS", + "FN_AGG_COUNT", + "LITERAL_EXACTNUMERIC", + "LITERAL_DATE", + "LITERAL_INTERVAL", + "LITERAL_TIMESTAMP_UTC", + "LITERAL_TIMESTAMP", + "LITERAL_NULL", + "LITERAL_STRING", + "LITERAL_DOUBLE", + "LITERAL_BOOL" + ] +} +``` + +The set of capabilities in the example above would be sufficient to pushdown all aspects of the following query: +```sql +SELECT user_id, count(url) FROM VS.clicks + WHERE user_id>1 + GROUP BY user_id + HAVING count(url)>1 + ORDER BY user_id + LIMIT 10; +``` + +The whole set of capabilities is a lot longer. The current list of supported Capabilities can be found in the sources of the JDBC Adapter: +* [High Level Capabilities](../jdbc-adapter/virtualschema-common/src/main/java/com/exasol/adapter/capabilities/MainCapability.java) +* [Literal Capabilities](../jdbc-adapter/virtualschema-common/src/main/java/com/exasol/adapter/capabilities/LiteralCapability.java) +* [Predicate Capabilities](../jdbc-adapter/virtualschema-common/src/main/java/com/exasol/adapter/capabilities/PredicateCapability.java) +* [Scalar Function Capabilities](../jdbc-adapter/virtualschema-common/src/main/java/com/exasol/adapter/capabilities/ScalarFunctionCapability.java) +* [Aggregate Function Capabilities](../jdbc-adapter/virtualschema-common/src/main/java/com/exasol/adapter/capabilities/AggregateFunctionCapability.java) + + +### Pushdown + +Contains an abstract specification of what to be pushed down, and requests an pushdown SQL statement from the Adapter which can be used to retrieve the requested data. + +**Request:** + +Running the following query +```sql +SELECT user_id, count(url) FROM VS.clicks + WHERE user_id>1 + GROUP BY user_id + HAVING count(url)>1 + ORDER BY user_id + LIMIT 10; +``` +will produce the following Request, assuming that the Adapter has all required capabilities. + +```json +{ + "type": "pushdown", + "pushdownRequest": { + "type" : "select", + "aggregationType" : "group_by", + "from" : + { + "type" : "table", + "name" : "CLICKS" + }, + "selectList" : + [ + { + "type" : "column", + "name" : "USER_ID", + "columnNr" : 1, + "tableName" : "CLICKS" + }, + { + "type" : "function_aggregate", + "name" : "count", + "arguments" : + [ + { + "type" : "column", + "name" : "URL", + "columnNr" : 2, + "tableName" : "CLICKS" + } + ] + } + ], + "filter" : + { + "type" : "predicate_less", + "left" : + { + "type" : "literal_exactnumeric", + "value" : "1" + }, + "right" : + { + "type" : "column", + "name" : "USER_ID", + "columnNr" : 1, + "tableName" : "CLICKS" + } + }, + "groupBy" : + [ + { + "type" : "column", + "name" : "USER_ID", + "columnNr" : 1, + "tableName" : "CLICKS" + } + ], + "having" : + { + "type" : "predicate_less", + "left" : + { + "type" : "literal_exactnumeric", + "value" : "1" + }, + "right" : + { + "type" : "function_aggregate", + "name" : "count", + "arguments" : + [ + { + "type" : "column", + "name" : "URL", + "columnNr" : 2, + "tableName" : "CLICKS" + } + ] + } + }, + "orderBy" : + [ + { + "type" : "order_by_element", + "expression" : + { + "columnNr" : 1, + "name" : "USER_ID", + "tableName" : "CLICKS", + "type" : "column" + }, + "isAscending" : true, + "nullsLast" : true + } + ], + "limit" : + { + "numElements" : 10 + } + }, + "involvedTables": [ + { + "name" : "CLICKS", + "columns" : + [ + { + "name" : "ID", + "dataType" : + { + "precision" : 18, + "scale" : 0, + "type" : "DECIMAL" + } + }, + { + "name" : "USER_ID", + "dataType" : + { + "precision" : 18, + "scale" : 0, + "type" : "DECIMAL" + } + }, + { + "name" : "URL", + "dataType" : + { + "size" : 1000, + "type" : "VARCHAR" + } + }, + { + "name" : "REQUEST_TIME", + "dataType" : + { + "type" : "TIMESTAMP" + } + } + ] + } + ], + "schemaMetadataInfo": { + ... + } +} +``` + +Notes +* ```pushdownRequest```: Specification what needs to be pushed down. You can think of it like a parsed SQL statement. + * ```from```: The requested from clause. Currently only tables are supported, joins might be supported in future. + * ```selectList```: The requested select list elements, a list of expression. The order of the selectlist elements matters. If the select list is an empty list, we request at least a single column/expression, which could also be constant TRUE. + * ```selectList.columnNr```: Position of the column in the virtual table, starting with 0 + * ```filter```: The requested filter (where clause), a single expression. + * ```aggregationType```: Optional element, set if an aggregation is requested. Either "group_by" or "single_group", if a aggregate function is used but no group by. + * ```groupBy```: The requested group by clause, a list of expressions. + * ```having```: The requested having clause, a single expression. + * ```orderBy```: The requested order-by clause, a list of ```order_by_element``` elements. The field ```expression``` contains the expression to order by. + * ```limit``` The requested limit of the result set, with an optional offset. +* ```involvedTables```: Metadata of the involved tables, encoded like in schemaMetadata. + + +**Response:** + +Following the example above, a valid result could look like this: + +```json +{ + "type": "pushdown", + "sql": "IMPORT FROM JDBC AT 'jdbc:exa:remote-db:8563;schema=native' USER 'sys' IDENTIFIED BY 'exasol' STATEMENT 'SELECT USER_ID, count(URL) FROM NATIVE.CLICKS WHERE 1 < USER_ID GROUP BY USER_ID HAVING 1 < count(URL) ORDER BY USER_ID LIMIT 10'" +} +``` + +Notes +* ```sql```: The pushdown SQL statement. It must be either an ```SELECT``` or ```IMPORT``` statement. + +## Embedded Commonly Used Json Elements + +The following Json objects can be embedded in a request or response. They have a fixed structure. + +### Schema Metadata Info +This document contains the most important metadata of the virtual schema and is sent to the adapter just "for information" with each request. It is the value of an element called ```schemaMetadataInfo```. + +```json +{"schemaMetadataInfo":{ + "name": "MY_HIVE_VSCHEMA", + "adapterNotes": { + "lastRefreshed": "2015-03-01 12:10:01", + "key": "Any custom schema state here" + }, + "properties": { + "HIVE_SERVER": "my-hive-server", + "HIVE_DB": "my-hive-db", + "HIVE_USER": "my-hive-user" + } +}} +``` + +### Schema Metadata + +This document is usually embedded in responses from the Adapter and informs the database about all metadata of the Virtual Schema, especially the contained Virtual Tables and it's columns. The Adapter can store so called ```adapterNotes``` on each level (schema, table, column), to remember information which might be relevant for the Adapter in future. In the example below, the Adapter remembers the table partitioning and the data type of a column which is not directly supported in EXASOL. The Adapter has these information during pushdown and can consider the table partitioning during pushdown or can add an appropriate cast for the column. + +```json +{"schemaMetadata":{ + "adapterNotes": { + "lastRefreshed": "2015-03-01 12:10:01", + "key": "Any custom schema state here" + }, + "tables": [ + { + "type": "table", + "name": "EXASOL_CUSTOMERS", + "adapterNotes": { + "hivePartitionColumns": ["CREATED", "COUNTRY_ISO"] + }, + "columns": [ + { + "name": "ID", + "dataType": { + "type": "DECIMAL", + "precision": 18, + "scale": 0 + }, + "isIdentity": true + }, + { + "name": "COMPANY_NAME", + "dataType": { + "type": "VARCHAR", + "size": 1000, + "characterSet": "UTF8" + }, + "default": "foo", + "isNullable": false, + "comment": "The official name of the company", + "adapterNotes": { + "hiveType": { + "dataType": "List" + } + } + }, + { + "name": "DISCOUNT_RATE", + "dataType": { + "type": "DOUBLE" + } + } + ] + }, + { + "type": "table", + "name": "TABLE_2", + "columns": [ + { + "name": "COL1", + "dataType": { + "type": "DECIMAL", + "precision": 18, + "scale": 0 + } + }, + { + "name": "COL2", + "dataType": { + "type": "VARCHAR", + "size": 1000 + } + } + ] + } + ] +}} +``` + +Notes +* ```adapterNotes``` is an optional field which can be attached to the schema, a table or a column. It can be an arbitrarily nested Json document. + +The following EXASOL data types are supported: + +**Decimal:** + +```json +{ + "name": "C_DECIMAL", + "dataType": { + "type": "DECIMAL", + "precision": 18, + "scale": 2 + } +} +``` + +**Double:** + +```json +{ + "name": "C_DOUBLE", + "dataType": { + "type": "DOUBLE" + } +} +``` + +**Varchar:** + +```json +{ + "name": "C_VARCHAR_UTF8_1", + "dataType": { + "type": "VARCHAR", + "size": 10000, + "characterSet": "UTF8" + } +} +``` + +```json +{ + "name": "C_VARCHAR_UTF8_2", + "dataType": { + "type": "VARCHAR", + "size": 10000 + } +} +``` + +```json +{ + "name": "C_VARCHAR_ASCII", + "dataType": { + "type": "VARCHAR", + "size": 10000, + "characterSet": "ASCII" + } +} +``` + +**Char:** + +```json +{ + "name": "C_CHAR_UTF8_1", + "dataType": { + "type": "CHAR", + "size": 3 + } +} +``` + +```json +{ + "name": "C_CHAR_UTF8_2", + "dataType": { + "type": "CHAR", + "size": 3, + "characterSet": "UTF8" + } +} +``` + +```json +{ + "name": "C_CHAR_ASCII", + "dataType": { + "type": "CHAR", + "size": 3, + "characterSet": "ASCII" + } +} +``` + +**Date:** + +```json +{ + "name": "C_DATE", + "dataType": { + "type": "DATE" + } +} +``` + +**Timestamp:** + +```json +{ + "name": "C_TIMESTAMP_1", + "dataType": { + "type": "TIMESTAMP" + } +} +``` +```json +{ + "name": "C_TIMESTAMP_2", + "dataType": { + "type": "TIMESTAMP", + "withLocalTimeZone": false + } +} +``` +```json +{ + "name": "C_TIMESTAMP_3", + "dataType": { + "type": "TIMESTAMP", + "withLocalTimeZone": true + } +} +``` + +**Boolean:** + +```json +{ + "name": "C_BOOLEAN", + "dataType": { + "type": "BOOLEAN" + } +} +``` + +**Geometry:** + +```json +{ + "name": "C_GEOMETRY", + "dataType": { + "type": "GEOMETRY", + "srid": 1 + } +} +``` +**Interval:** +```json +{ + "name": "C_INTERVAL_DS_1", + "dataType": { + "type": "INTERVAL", + "fromTo": "DAY TO SECONDS" + } +} +``` + +```json +{ + "name": "C_INTERVAL_DS_2", + "dataType": { + "type": "INTERVAL", + "fromTo": "DAY TO SECONDS", + "precision": 3, + "fraction": 4 + } +} +``` + +```json +{ + "name": "C_INTERVAL_YM_1", + "dataType": { + "type": "INTERVAL", + "fromTo": "YEAR TO MONTH" + } +} +``` + +```json +{ + "name": "C_INTERVAL_YM_2", + "dataType": { + "type": "INTERVAL", + "fromTo": "YEAR TO MONTH", + "precision": 3 + } +} +``` + + +## Expressions + +This section handles the expressions that can occur in a pushdown request. Expressions are consistently encoded in the following way. This allows easy and consisting parsing and serialization. + +```json +{ + "type": "", + ... +} +``` + +Each expression-type can have any number of additional fields of arbitrary type. In the following sections we define the known expressions. + +### Table + +This element currently only occurs in from clause + +```json +{ + "type": "table", + "name": "CLICKS" +} +``` + +### Column Lookup + +```json +{ + "type": "column", + "tableName": "T", + "tablePosFromClause": 0, + "columnNr": 0, + "name": "ID" +} +``` + +Notes +* **tablePosFromClause**: Position of the table in the from clause, starting with 0. Required for joins where same table occurs several times. +* **columnNr**: column number in the virtual table, starting with 0 + +### Literal + +```json +{ + "type": "literal_null" +} +``` + +```json +{ + "type": "literal_string", + "value": "my string" +} +``` + +```json +{ + "type": "literal_double", + "value": "1.234" +} +``` + +```json +{ + "type": "literal_exactnumeric", + "value": "12345" +} +``` + +```json +{ + "type": "literal_bool", + "value": true +} +``` + +```json +{ + "type": "literal_date", + "value": "2015-12-01" +} +``` + +```json +{ + "type": "literal_timestamp", + "value": "2015-12-01 12:01:01.1234" +} +``` + +```json +{ + "type": "literal_timestamputc", + "value": "2015-12-01 12:01:01.1234" +} +``` + +### Predicates + +Whenever there is ```...``` this is a shortcut for an arbitrary expression. + +```json +{ + "type": "predicate_and", + "expressions": [ + ... + ] +} +``` + +The same can be used for "predicate_or". + +```json +{ + "type": "predicate_not", + "expression": { + ... + } +} +``` + +```json +{ + "type": "predicate_equals", + "left": { + ... + }, + "right": { + ... + } +} +``` + +The same can be used for "predicate_notequals", "predicate_less" and "predicate_lessequals". + +```json +{ + "type": "predicate_like", + "expression": { + ... + }, + "pattern": { + ... + }, + "escapeChar": "%" +} +``` + +The same can be used for predicate_like_regexp + +Notes +* **escapeChar** is optional + +```json +{ + "type": "predicate_between", + "expression": { + ... + }, + "left": { + ... + }, + "right": { + ... + } +} +``` + + IN (, ) + +```json +{ + "type": "predicate_in_constlist", + "expression": { + ... + } + "arguments": [ + ... + ] +} +``` + +### Scalar Functions + +Single argument (consistent with multiple argument version) + +```json +{ + "type": "function_scalar", + "numArgs": 1, + "name": "ABS", + "arguments": [ + { + ... + } + ] +} +``` + +Multiple arguments + +```json +{ + "type": "function_scalar", + "numArgs": 2, + "name": "POWER", + "arguments": [ + { + ... + }, + { + ... + } + ] +} +``` + +```json +{ + "type": "function_scalar", + "variableInputArgs": true, + "name": "CONCAT", + "arguments": [ + { + ... + }, + { + ... + }, + { + ... + } + ] +} +``` + +Notes +* **variableInputArgs**: default value is false. If true, numArgs is not defined. + +Arithmetic operators have following names: ADD, SUB, MULT, FLOAT_DIV. They are defined as infix (just a hint, not necessary) + +```json +{ + "type": "function_scalar", + "numArgs": 2, + "name": "ADD", + "infix": true, + "arguments": [ + { + ... + }, + { + ... + } + ] +} +``` + +### Aggregate Functions + +Consistent with scalar functions. To be detailed: star-operator, distinct, ... + +```json +{ + "type": "function_aggregate", + "name": "SUM", + "arguments": [ + { + ... + } + ] +} +``` + +```json +{ + "type": "function_aggregate", + "name": "CORR", + "arguments": [ + { + ... + }, + { + ... + } + ] +} +``` + +**Special cases** + +COUNT(exp) (requires set-function capability COUNT) + +```json +{ + "type": "function_aggregate", + "name": "COUNT", + "arguments": [ + { + ... + } + ] +} +``` + +COUNT(*) (requires set-function capability COUNT and COUNT_STAR) + +```json +{ + "type": "function_aggregate", + "name": "COUNT" +} +``` + +COUNT(DISTINCT exp) (requires set-function capability COUNT and COUNT_DISTINCT) + +```json +{ + "type": "function_aggregate", + "name": "COUNT", + "distinct": true, + "arguments": [ + { + ... + } + ] +} +``` + +COUNT((exp1, exp2)) (requires set-function capability COUNT and COUNT_TUPLE) + +```json +{ + "type": "function_aggregate", + "name": "COUNT", + "distinct": true, + "arguments": [ + { + ... + }, + { + ... + } + ] +} +``` + +GROUP_CONCAT(DISTINCT exp1 SEPARATOR ', ') + +```json +{ + "type": "function_aggregate_group_concat", + "name": "GROUP_CONCAT", + "distinct": true, + "separator": ", ", + "arguments": [ + { + ... + } + ] +} +``` + +AVG is analogous (with distinct option) diff --git a/jdbc-adapter/README.md b/jdbc-adapter/README.md new file mode 100644 index 000000000..9663108c2 --- /dev/null +++ b/jdbc-adapter/README.md @@ -0,0 +1,152 @@ +# JDBC Adapter for Virtual Schemas + +## Overview +This is an adapter for virtual schemas to connect to JDBC data sources, like Hive or Oracle or any other. It serves as the reference adapter for the virtual schema framework. + +If you are interested in a introduction to virtual schemas please refer to our [virtual schemas documentation](../doc). + + +## Deploying the Adapter + +Run the following steps to deploy your adapter: + +### Prerequisites: +* EXASOL >= 6.0 +* Advanced edition (which includes the ability to execute adapter scripts) + +### Build: + +To build a fat jar (including all dependencies) run: +``` +mvn clean -DskipTests package +``` + +The resulting fat jar is stored in ```virtualschema-jdbc-adapter-dist/target/virtualschema-jdbc-adapter-dist-0.0.1-SNAPSHOT.jar```. + +### Upload Adapter jar + +You have to upload the jar of the adapter to a bucket of your choice. This will allow using the jar in the adapter script. See https://github.com/EXASOL/script-language-plugins#authentication-uploading-and-deleting-files for how to use BucketFS. + +### Upload JDBC Driver files + +You have to upload the JDBC driver files of your remote database two times: First into a bucket of your choice, so that they can be accessed from the adapter script. Second you have to upload the files as a JDBC driver in EXAOperation (under Software -> JDBC Drivers). + +### Deploy Adapter Script +Then run the following SQL commands to deploy the adapter in the database: +```sql +-- The adapter is simply a script. It has to be stored in any regular schema. +CREATE SCHEMA adapter; +CREATE JAVA ADAPTER SCRIPT adapter.jdbc_adapter AS + + // This is the class implementing the callback method of the adapter script + %scriptclass com.exasol.adapter.jdbc.JdbcAdapter; + + // This will add the adapter jar to the classpath so that it can be used inside the adapter script + // Replace the names of the bucketfs and the bucket with the ones you used. + %jar /buckets/your-bucket-fs/your-bucket/virtualschema-jdbc-adapter-dist-0.0.1-SNAPSHOT.jar; + + // You have to add all files of the data source jdbc driver here (e.g. MySQL or Hive) + %jar /buckets/your-bucket-fs/your-bucket/name-of-data-source-jdbc-driver.jar +/ +``` + + +## Using the Adapter +Create a virtual schema: +```sql +CREATE CONNECTION hive_conn TO 'jdbc:hive2://localhost:10000/default' USER 'hive-usr' IDENTIFIED BY 'hive-pwd'; + +CREATE VIRTUAL SCHEMA hive USING adapter.jdbc_adapter WITH + SQL_DIALECT = 'HIVE' + CONNECTION_NAME = 'HIVE_CONN' + SCHEMA_NAME = 'default'; +``` + +Explore the virtual schema: +```sql +OPEN SCHEMA hive; +SELECT * FROM cat; +DESCRIBE clicks; +``` + +Run queries: +```sql +SELECT count(*) FROM clicks; +SELECT DISTINCT USER_ID FROM clicks; +``` + +Or refresh the schemas metadata, e.g. if tables were added in the remote system: +```sql +ALTER VIRTUAL SCHEMA hive REFRESH; +ALTER VIRTUAL SCHEMA hive REFRESH TABLES t1 t2; -- refresh only these tables +``` + +Or set properties. This might update the metadata (if you change the remote database) or not. +```sql +ALTER VIRTUAL SCHEMA hive SET TABLE_FILTER='CUSTOMERS, CLICKS'; +``` + +Or unset properties: +```sql +ALTER VIRTUAL SCHEMA hive SET TABLE_FILTER=null; +``` + +Or drop the schema +```sql +DROP VIRTUAL SCHEMA hive CASCADE; +``` + + + +### Adapter Properties +Note that properties are always strings, like `TABLE_FILTER='T1,T2'`. + +**Mandatory Properties:** + +Parameter | Value +--------------------------- | ----------- +**SQL_DIALECT** | Name of the SQL dialect, e.g. EXASOL, IMPALA, ORACLE or GENERIC (case insensitive). For some SQL dialects we have presets which are used for the pushdown SQL query generation. If you try to generate a virtual schema without specifying this property you will see all available dialects in the error message. +**CONNECTION_NAME** | Name of the connection created with ```CREATE CONNECTION``` which contains the jdbc connection string, the username and password. You don't need to set CONNECTION_STRING, USERNAME and PASSWORD if you define this property. We recommend this to ensure that passwords are not shown in the logfiles. +**CONNECTION_STRING** | The jdbc connection string. Only required if CONNECTION_NAME is not set. + + +**Typical Optional Parameters:** + +Parameter | Value +--------------------------- | ----------- +**CATALOG_NAME** | The name of the remote jdbc catalog. This is usually case-sensitive, depending on the dialect. It depends on the dialect whether you have to specify this or not. Usually you have to specify it if the data source JDBC driver supports the concepts of catalogs. +**SCHEMA_NAME** | The name of the remote jdbc schema. This is usually case-sensitive, depending on the dialect. It depends on the dialect whether you have to specify this or not. Usually you have to specify it if the data source JDBC driver supports the concepts of schemas. +**USERNAME** | Username for authentication. Can only be set if CONNECTION_NAME is not set. +**PASSWORD** | Password for authentication. Can only be set if CONNECTION_NAME is not set. + + +**Advanced Optional Properties:** + +Parameter | Value +--------------------------- | ----------- +**TABLE_FILTER** | A comma-separated list of tablenames (case sensitive). Only these tables will be available, other tables are ignored. Use this if you don't want to have all remote tables in your virtual schema. +**IMPORT_FROM_EXA** | Either 'TRUE' or 'FALSE' (default). If true, IMPORT FROM EXA will be used for the pushdown instead of IMPORT FROM JDBC. You have to define EXA_CONNECTION_STRING if this property is true. +**EXA_CONNECTION_STRING** | The connection string used for IMPORT FROM EXA in the format 'hostname:port'. +**DEBUG_ADDRESS** | The IP address/hostname and port of the UDF debugging service, e.g. 'myhost:3000'. Debug output from the UDFs will be sent to this address. See the section on debugging below. +**IS_LOCAL** | Either 'TRUE' or 'FALSE' (default). If true, you are connecting to the local EXASOL database (e.g. for testing purposes). In this case, the adapter can avoid the IMPORT FROM JDBC overhead. + + + +## Debugging +To see all communication between the database and the adapter you can use the python script udf_debug.py. + +First, start the udf_debug.py script, which will listen on the specified address and print all incoming text. +``` +python tools/udf_debug.py -s myhost -p 3000 +``` +And set the DEBUG_ADDRESS properties so that the adapter will send debug output to the specified address. +```sql +ALTER VIRTUAL SCHEMA vs SET DEBUG_ADDRESS='myhost:3000' +``` + + + + +## Frequent Issues +* **Error: No suitable driver found for jdbc...**: The jdbc driver class was not discovered automatically. Either you have to add a META-INF/services/java.sql.Driver file with the classname to your jar, or you have to load the driver manually (see JdbcMetadataReader.readRemoteMetadata()). +See https://docs.oracle.com/javase/7/docs/api/java/sql/DriverManager.html diff --git a/jdbc-adapter/develop-dialect.md b/jdbc-adapter/develop-dialect.md new file mode 100644 index 000000000..0fdb34cac --- /dev/null +++ b/jdbc-adapter/develop-dialect.md @@ -0,0 +1,76 @@ +# How to develop and test a dialect +This page describes how you can develop and semi-automatically test an dialect for the JDBC adapter. The integration tests are work in progress. + +## How to develop a dialect +We recommend the following steps for the development of a dialect. +Please look up in the sourcecode of the ```com.exasol.adapter.dialects.SqlDialect``` for the methods you can override. +You can also have a look at the implementation of an existing dialect for inspiration. + +### Setup data source +* Setup and start the database +* Testdata: Create a test schema with a simple table (simple data types) + +### Setup EXASOL +* Setup and start an EXASOL database with virtual schemas feature +* Upload the JDBC drivers of the data source via EXAOperation +* Manual test: query data from the data source via IMPORT FROM JDBC + +### Catalog, Schema & Table Mapping +* Override the SqlDialect methods for catalog, schema and table mapping +* Manual test: create a virtual schema by specifying the catalog and/or schema. + +### Data Type Mapping +* Testdata: Create a table with all data types and at least one row of data +* Override the SqlDialect method for data type mapping +* Automatic test: sys tables show virtual table and columns with correctly mapped type +* Automatic test: running SELECT on the virtual table returns the expected result + +### Identifier Case Handling & Quoting +* Testdata: Create a schema/table/column with mixed case (if supported) +* Automatic test: sys tables correct +* Automatic test: SELECT works as expected + +### Projection Capability +* Add capability +* Automatic test: pushed down & correct result (incl. EXPLAIN VIRTUAL). Also test with mixed case columns. + +### Predicates and Literal Capabilities +* Add capabilities for supported literals and predicates (e.g. c1='foo') +* Automatic test: pushed down & correct result (incl. EXPLAIN VIRTUAL) for all predicates & literals + +### Aggregation & Set Function Capabilities +* Add capabilities for aggregations and aggregation functions +* Automatic test: pushed down & correct result (incl. EXPLAIN VIRTUAL) for all set functions + +### Order By / Limit Capabilities +* Testdata: Create a table with null values and non-null values, to check null collation. +* Add capabilities for order by and/or limit +* Automatic test: pushed down & correct result (incl. EXPLAIN VIRTUAL) +* Automatic test: default null collation, explicit NULLS FIRST/LAST + +### Scalar Function Capabilities +* Add capabilities for scalar functions +* Automatic test: pushed down & correct result (incl. EXPLAIN VIRTUAL) + +### Views +* Testdata: Create a simple view, e.g. joining two existing tables +* Automatic test: Query the view, optionally e.g. with a filter. + +## How to start integration tests +We assume that you have a running EXASOL and data source database with all required test tables. + +We use following Maven phases for our integration tests: +* pre-integration-test phase is used to automatically deploy the latest jdbc adapter jar (based on your latest code modifications) +* integration-test phase is used to execute the actual integration tests + +Note that to check whether the integration-tests were successful, you have to run the verify Maven phase. + +You can start the integration tests as follows: +``` +mvn clean package && mvn verify -Pit -Dintegrationtest.configfile=/path/to/your/integration-test-config.yaml +``` + +This will run all integration tests, i.e. all junit tests with the suffix "IT" in the filename. The yaml configuration file stores the information for your test environment like jdbc connection strings, paths and credentials. + + + diff --git a/jdbc-adapter/integration-test-data/impala-testdata.sql b/jdbc-adapter/integration-test-data/impala-testdata.sql new file mode 100644 index 000000000..dccf2ee41 --- /dev/null +++ b/jdbc-adapter/integration-test-data/impala-testdata.sql @@ -0,0 +1,55 @@ + +-- Execute following statements in Impala: +create table all_hive_impala_types ( + c1 tinyint, + c2 smallint, + c3 int, + c4 bigint, + c5 float, + c6 double, + c7 decimal, + c8 decimal(12,2), + c9 decimal(38,2), + c10 timestamp, + c11 string, + c12 varchar(1000), + c13 char(10), + c14 boolean +); + +insert into all_hive_impala_types values ( + 123, + 12345, + 1234567890, + 1234567890123456789, + 12.2, + 12.2, + 12345, + 12345.12, + 12345.12, + '1985-09-25 17:45:30.005', + 'abc', + CAST('varchar 茶' AS VARCHAR(1000)), + CAST('char 茶' AS CHAR(10)), + true +); + +CREATE TABLE simple(a int, b string, c double); + +INSERT INTO simple VALUES + (1, 'a', 1.1), + (2, 'b', 2.2), + (3, 'c', 3.3), + (1, 'd', 4.4), + (2, 'e', 5.5), + (3, 'f', 6.6); + +CREATE TABLE simple_with_nulls (c1 int, c2 string); + +INSERT INTO simple_with_nulls VALUES + (1, 'a'), + (2, null), + (3, 'b'), + (1, null), + (null, 'c'); + diff --git a/jdbc-adapter/integration-test-sample.yaml b/jdbc-adapter/integration-test-sample.yaml new file mode 100644 index 000000000..e8eee9ab9 --- /dev/null +++ b/jdbc-adapter/integration-test-sample.yaml @@ -0,0 +1,67 @@ +# Configuration file for integration tests + +general: + debug: false + jdbcAdapterPath: /buckets/mybucketfs/mybucket/jdbc-adapter/virtualschema-jdbc-adapter-dist-0.0.1-SNAPSHOT.jar + scpTargetPath: 'ws64-2.dev.exasol.com:/x/u/ah1597/jdbc-adapter/integrationtest/' + debugAddress: '192.168.0.12:3000' # Address which will be defined as DEBUG_ADDRESS in the virtual schemas + +exasol: + runIntegrationTests: true + address: exasol-host:1234 + user: sys + password: exasol + +# Generic sql dialect is tested via MySQL +generic: + runIntegrationTests: false + jdbcDriverPath: /buckets/mybucketfs/mybucket/mysql/mysql-connector-java-5.1.38-bin.jar + connectionString: jdbc:mysql://mysql-host/mydatabase + user: myuser + password: mypass + +oracle: + runIntegrationTests: false + jdbcDriverPath: /buckets/mybucketfs/mybucket/oracle/ojdbc7.jar + connectionString: jdbc:oracle:thin:@oracle-host:1521:orcl + user: myuser + password: mypass + + +impala: + runIntegrationTests: false + connectionString: jdbc:impala://impala-host:21050;AuthMech=0 + jdbcDriverPath: /buckets/mybucketfs/mybucket/Cloudera_Impala_JDBC_2_5_28.1047_Driver/ + jdbcDriverJars: + - hive_metastore.jar + - hive_service.jar + - ImpalaJDBC41.jar + - libfb303-0.9.0.jar + - libthrift-0.9.0.jar + - log4j-1.2.14.jar + - ql.jar + - slf4j-api-1.5.11.jar + - slf4j-log4j12-1.5.11.jar + - TCLIServiceClient.jar + - zookeeper-3.4.6.jar + + +kerberos: + runIntegrationTests: false + jdbcDriverPath: /buckets/mybucketfs/mybucket/cloudera-hive-jdbc-driver/ + jdbcDriverJars: + - HiveJDBC41.jar + - hive_metastore.jar + - hive_service.jar + - libfb303-0.9.0.jar + - libthrift-0.9.0.jar + - log4j-1.2.14.jar + - ql.jar + - slf4j-api-1.5.11.jar + - slf4j-log4j12-1.5.11.jar + - TCLIServiceClient.jar + - zookeeper-3.4.6.jar + connectionString: jdbc:hive2://hadoop-host.yourcompany.com:10000/;AuthMech=1;KrbRealm=YOURCOMPANY.COM;KrbHostFQDN=hadoop-host.yourcompany.com;KrbServiceName=hive + user: testuser@YOURCOMPANY.COM + password: ExaAuthType=Kerberos;X3xpYmRlZmF1bHRzXQpkZWZhdWx0X3JlYWxtID0gT01HLkRFVi5FWEFTT0wuQ09NCmRuc19jYW5vbmljYWxpemVfaG9zdG5hbWUgPSBmYWxzZQpkbnNfbG9va3VwX2tkYyA9IGZhbHNlCmRuc19sb29rdXBfcmVhbG0gPSBmYWxzZQp0aWNrZXRfbGlmZXRpbWUgPSA4NjQwMApyZW5ld19saWZldGltZSA9IDYwNDgwMApmb3J3YXJkYWJsZSA9IHRydWUKZGVmYXVsdF90Z3NfZW5jdHlwZXMgPSBhcmNmb3VyLWhtYWMKZGVmYXVsdF90a3RfZW5jdHlwZXMgPSBhcmNmb3VyLWhtYWMKcGVybWl0dGVkX2VuY3R5cGVzID0gYXJjZm91ci1obWFjCnVkcF9wcmVmZXJlbmNlX2xpbWl0ID0gMQpbcmVhbG1zXQpPTUcuREVWLkVYQVNPTC5DT00gPSB7CmtkYyA9IGhhZG9vcDAxLm9tZy5kZXYuZXhhc29sLmNvbQphZG1pbl9zZXJ2ZXIgPSBoYWRvb3AwMS5vbWcuZGV2LmV4YXNvbC5jb20KfQo=;BQIAAABBAAEAEk9NRy5ERVYuRVhBU09MLkNPTQAMaGFkb29wdGVzdGVyAAAAAVYo0X0BABcAEGuPtGr6sYdhUEbTqhYQ3E0= + diff --git a/jdbc-adapter/pom.xml b/jdbc-adapter/pom.xml new file mode 100644 index 000000000..5bebf69b6 --- /dev/null +++ b/jdbc-adapter/pom.xml @@ -0,0 +1,70 @@ + + 4.0.0 + com.exasol + virtualschema-jdbc-adapter-main + 0.0.1-SNAPSHOT + + pom + + + virtualschema-common + virtualschema-jdbc-adapter + virtualschema-jdbc-adapter-dist + + + + 1.7 + + + + + maven.exasol.com + https://maven.exasol.com/artifactory/exasol-releases + + false + + + + maven.exasol.com-snapshots + https://maven.exasol.com/artifactory/exasol-snapshots + + true + + + + + + + com.google.guava + guava + 18.0 + + + junit + junit + 4.11 + test + + + org.mockito + mockito-core + 2.0.52-beta + + + + + + + + org.apache.maven.plugins + maven-compiler-plugin + 3.2 + + ${java.version} + ${java.version} + + + + + diff --git a/jdbc-adapter/tools/udf_debug.py b/jdbc-adapter/tools/udf_debug.py new file mode 100644 index 000000000..b6beb15e6 --- /dev/null +++ b/jdbc-adapter/tools/udf_debug.py @@ -0,0 +1,85 @@ +#!/usr/bin/python + + +import sys +import socket +import asyncore, asynchat + +from threading import Thread + +class ScriptOutputThread(Thread): + def init(this): + class log_server(asyncore.dispatcher): + def __init__(self): + asyncore.dispatcher.__init__(self) + self.create_socket(socket.AF_INET, socket.SOCK_STREAM) + self.bind(this.serverAddress) + if this.serverAddress[1] == 0: + this.serverAddress = (this.serverAddress[0], self.socket.getsockname()[1]) + self.listen(10) + def handle_accept(self): + log_handler(*self.accept()) + def handle_close(self): + self.close() + + class log_handler(asynchat.async_chat): + def __init__(self, sock, address): + asynchat.async_chat.__init__(self, sock = sock) + self.set_terminator("\n") + self.address = "%s:%d" % address + self.ibuffer = [] + def collect_incoming_data(self, data): + self.ibuffer.append(data) + def found_terminator(self): + this.fileObject.write("%s> %s\n" % (self.address, ''.join(self.ibuffer).rstrip())) + self.ibuffer = [] + + this.serv = log_server() + + def run(self): + try: + while not self.finished: + asyncore.loop(timeout = 1, count = 1) + finally: + self.serv.close() + del self.serv + asyncore.close_all() + +def outputService(): + """Start a standalone output service + + This service can be used in an other Python or R instance, for + Python instances the connection parameter externalClient need to + be specified. + """ + try: host = socket.gethostbyname(socket.gethostname()) + except: host = '0.0.0.0' + + from optparse import OptionParser + parser = OptionParser(description = + """This script binds to IP and port and outputs everything it gets from + the connections to stdout with all lines prefixed with client address.""") + parser.add_option("-s", "--server", dest="server", metavar="SERVER", type="string", + default=host, + help="hostname or IP address to bind to (default: %default)") + parser.add_option("-p", "--port", dest="port", metavar="PORT", type="int", default=3000, + help="port number to bind to (default: %default)") + #(options, args) = parser.parse_args() + options = parser.parse_args()[0] + address = options.server, options.port + sys.stdout.flush() + server = ScriptOutputThread() + server.serverAddress = address + server.fileObject = sys.stdout + server.finished = False + server.init() + print ">>> bind the output server to %s:%d" % server.serverAddress + sys.stdout.flush() + try: server.run() + except KeyboardInterrupt: + sys.stdout.flush() + sys.exit(0) + + + +outputService() diff --git a/jdbc-adapter/virtualschema-common/pom.xml b/jdbc-adapter/virtualschema-common/pom.xml new file mode 100644 index 000000000..0a9511f4d --- /dev/null +++ b/jdbc-adapter/virtualschema-common/pom.xml @@ -0,0 +1,27 @@ + + 4.0.0 + + + com.exasol + virtualschema-jdbc-adapter-main + 0.0.1-SNAPSHOT + + + virtualschema-common + + + + + org.glassfish + javax.json + 1.0.4 + + + com.exasol + exasol-script-api + 6.0-SNAPSHOT + + + + diff --git a/jdbc-adapter/virtualschema-common/src/main/java/com/exasol/adapter/capabilities/AggregateFunctionCapability.java b/jdbc-adapter/virtualschema-common/src/main/java/com/exasol/adapter/capabilities/AggregateFunctionCapability.java new file mode 100644 index 000000000..47e07928f --- /dev/null +++ b/jdbc-adapter/virtualschema-common/src/main/java/com/exasol/adapter/capabilities/AggregateFunctionCapability.java @@ -0,0 +1,69 @@ +package com.exasol.adapter.capabilities; + +import com.exasol.adapter.sql.AggregateFunction; + +/** + * List of all aggregation function capabilities supported by EXASOL + */ +public enum AggregateFunctionCapability { + + // required for any kind of COUNT(...) with expressions + COUNT, + // required only for COUNT(*) + COUNT_STAR (AggregateFunction.COUNT), + // required for COUNT(DISTINCT ...) + COUNT_DISTINCT (AggregateFunction.COUNT), + // Note that the pushdown of grouping by a tuple of expressions is not currently supported in EXASOL. Example: COUNT([ALL|DISCINCT) (exp1, exp2)) + + SUM, + SUM_DISTINCT (AggregateFunction.SUM), + MIN, + MAX, + AVG, + AVG_DISTINCT (AggregateFunction.AVG), + + MEDIAN, + + FIRST_VALUE, + LAST_VALUE, + + STDDEV, + STDDEV_DISTINCT (AggregateFunction.STDDEV), + STDDEV_POP, + STDDEV_POP_DISTINCT (AggregateFunction.STDDEV_POP), + STDDEV_SAMP, + STDDEV_SAMP_DISTINCT (AggregateFunction.STDDEV_SAMP), + + VARIANCE, + VARIANCE_DISTINCT (AggregateFunction.VARIANCE), + VAR_POP, + VAR_POP_DISTINCT (AggregateFunction.VAR_POP), + VAR_SAMP, + VAR_SAMP_DISTINCT (AggregateFunction.VAR_SAMP), + + GROUP_CONCAT, + GROUP_CONCAT_DISTINCT (AggregateFunction.GROUP_CONCAT), + GROUP_CONCAT_SEPARATOR (AggregateFunction.GROUP_CONCAT), + GROUP_CONCAT_ORDER_BY (AggregateFunction.GROUP_CONCAT), + + GEO_INTERSECTION_AGGREGATE, + GEO_UNION_AGGREGATE, + + APPROXIMATE_COUNT_DISTINCT; + + + + private AggregateFunction function; + + AggregateFunctionCapability() { + this.function = AggregateFunction.valueOf(this.name()); + } + + AggregateFunctionCapability(AggregateFunction function) { + this.function = function; + } + + public AggregateFunction getFunction() { + return function; + } +} diff --git a/jdbc-adapter/virtualschema-common/src/main/java/com/exasol/adapter/capabilities/Capabilities.java b/jdbc-adapter/virtualschema-common/src/main/java/com/exasol/adapter/capabilities/Capabilities.java new file mode 100644 index 000000000..06c5e3abc --- /dev/null +++ b/jdbc-adapter/virtualschema-common/src/main/java/com/exasol/adapter/capabilities/Capabilities.java @@ -0,0 +1,95 @@ +package com.exasol.adapter.capabilities; + +import com.exasol.adapter.sql.AggregateFunction; +import com.exasol.adapter.sql.Predicate; + +import java.util.HashSet; +import java.util.Set; + +/** + * Manages a set of supported Capabilities + */ +public class Capabilities { + + private Set mainCapabilities = new HashSet<>(); + private Set scalarFunctionCaps = new HashSet<>(); + private Set predicateCaps = new HashSet<>(); + private Set aggregateFunctionCaps = new HashSet<>(); + private Set literalCaps = new HashSet<>(); + + public void supportAllCapabilities() { + for (MainCapability cap : MainCapability.values()) { + supportMainCapability(cap); + } + for (ScalarFunctionCapability function : ScalarFunctionCapability.values()) { + supportScalarFunction(function); + } + for (PredicateCapability pred : PredicateCapability.values()) { + supportPredicate(pred); + } + for (AggregateFunctionCapability function : AggregateFunctionCapability.values()) { + supportAggregateFunction(function); + } + for (LiteralCapability cap : LiteralCapability.values()) { + supportLiteral(cap); + } + } + + public void subtractCapabilities(Capabilities capabilitiesToSubtract) { + for (MainCapability cap : capabilitiesToSubtract.mainCapabilities) { + mainCapabilities.remove(cap); + } + for (ScalarFunctionCapability cap : capabilitiesToSubtract.getScalarFunctionCapabilities()) { + scalarFunctionCaps.remove(cap); + } + for (PredicateCapability cap : capabilitiesToSubtract.getPredicateCapabilities()) { + predicateCaps.remove(cap); + } + for (AggregateFunctionCapability cap : capabilitiesToSubtract.getAggregateFunctionCapabilities()) { + aggregateFunctionCaps.remove(cap); + } + for (LiteralCapability cap : capabilitiesToSubtract.getLiteralCapabilities()) { + literalCaps.remove(cap); + } + } + + public void supportMainCapability(MainCapability cap) { + mainCapabilities.add(cap); + } + + public void supportScalarFunction(ScalarFunctionCapability functionType) { + scalarFunctionCaps.add(functionType); + } + + public void supportPredicate(PredicateCapability predicate) { + predicateCaps.add(predicate); + } + + public void supportAggregateFunction(AggregateFunctionCapability functionType) { + aggregateFunctionCaps.add(functionType); + } + + public void supportLiteral(LiteralCapability literal) { + literalCaps.add(literal); + } + + public Set getMainCapabilities() { + return mainCapabilities; + } + + public Set getScalarFunctionCapabilities() { + return scalarFunctionCaps; + } + + public Set getPredicateCapabilities() { + return predicateCaps; + } + + public Set getAggregateFunctionCapabilities() { + return aggregateFunctionCaps; + } + + public Set getLiteralCapabilities() { + return literalCaps; + } +} diff --git a/jdbc-adapter/virtualschema-common/src/main/java/com/exasol/adapter/capabilities/LiteralCapability.java b/jdbc-adapter/virtualschema-common/src/main/java/com/exasol/adapter/capabilities/LiteralCapability.java new file mode 100644 index 000000000..12946acf3 --- /dev/null +++ b/jdbc-adapter/virtualschema-common/src/main/java/com/exasol/adapter/capabilities/LiteralCapability.java @@ -0,0 +1,65 @@ +package com.exasol.adapter.capabilities; + +/** + * This is an enumeration of the capabilities for literals supported by the EXASOL Virtual Schema Framework. + * + *

E.g. to execute a system "SELECT * FROM t WHERE username='alice'" your data source needs the {@link #STRING} capability.

+ */ +public enum LiteralCapability { + + /** + * The literal for null values. + *

Example in EXASOL syntax: NULL

+ */ + NULL, + + /** + * The literal for boolean values. + *

Example in EXASOL syntax: TRUE

+ */ + BOOL, + + /** + * The literal for date values. + *

Example in EXASOL syntax: DATE '2000-01-28'

+ */ + DATE, + + /** + * The literal for timestamp values. + *

Example in EXASOL syntax: TIMESTAMP '2000-01-28 12:30:01.001'

+ */ + TIMESTAMP, + + /** + * The literal for timestamp values. There is no direct literal for timestamps, but it can be created via casting. + * + *

Example in EXASOL syntax: CAST(TIMESTAMP '2000-01-28 12:30:01.001' AS TIMESTAMP WITH LOCAL TIME ZONE)

+ */ + TIMESTAMP_UTC, + + /** + * The literal for double values. + *

Example in EXASOL syntax: 100.23

+ */ + DOUBLE, + + /** + * The literal for exact numeric values. + *

Example in EXASOL syntax: 123

+ */ + EXACTNUMERIC, + + /** + * The literal for string values. + *

Example in EXASOL syntax: 'alice'

+ */ + STRING, + + /** + * The literal for interval values. + *

Example in EXASOL syntax: INTERVAL '5' MONTH

+ */ + INTERVAL + +} diff --git a/jdbc-adapter/virtualschema-common/src/main/java/com/exasol/adapter/capabilities/MainCapability.java b/jdbc-adapter/virtualschema-common/src/main/java/com/exasol/adapter/capabilities/MainCapability.java new file mode 100644 index 000000000..2c72ad1d6 --- /dev/null +++ b/jdbc-adapter/virtualschema-common/src/main/java/com/exasol/adapter/capabilities/MainCapability.java @@ -0,0 +1,103 @@ +package com.exasol.adapter.capabilities; + +/** + * This is an enumeration of the main capabilities supported by the EXASOL Virtual Schema Framework. + */ +public enum MainCapability { + + /** + * Support projections, i.e. request only a subset of all columns of the + * table. + * + *

Example: SELECT a FROM t;

+ */ + SELECTLIST_PROJECTION, + + /** + * Support expressions in the select list; + * Additional capabilities are required depending on the expression. + * + *

Example: SELECT a+1, ucase(b) FROM t;

+ */ + SELECTLIST_EXPRESSIONS, + + /** + * Support filter expressions. Additional capabilities are required + * depending on the expression. + * + *

Example: SELECT * FROM t WHERE a>2

+ */ + FILTER_EXPRESSIONS, + + /** + * Support aggregations with a single group. This happens when an + * aggregation-function is used without a group by clause. + * + *

Example: SELECT min(a) FROM t;

+ */ + AGGREGATE_SINGLE_GROUP, + + /** + * Support aggregations with a group by clause consisting of columns. + * + *

Example: SELECT a, b, min(c) FROM t GROUP BY a, b;

+ */ + AGGREGATE_GROUP_BY_COLUMN, + + /** + * Support aggregations with a group by clause that contains expressions. + * + *

Example: SELECT a+1, min(b) FROM t GROUP BY a+1;

+ */ + AGGREGATE_GROUP_BY_EXPRESSION, + + /** + * Support aggregations with a group by clause with multiple group by + * columns or expressions. + * Note that you might additionally require AGGREGATE_GROUP_BY_EXPRESSION or + * AGGREGATE_GROUP_BY_COLUMN. + * + *

Example: SELECT a, b, min(c) FROM t GROUP BY a, b;

+ */ + AGGREGATE_GROUP_BY_TUPLE, + + /** + * Support aggregations with a having clause. Additional capabilities might + * be required depending on the expression. + * + *

Example: SELECT a, min(b) FROM t GROUP BY a HAVING min(b) > 10;

+ */ + AGGREGATE_HAVING, + + /** + * Support to order the result by columns. + * + *

Attention: This includes the capability to specify NULLS FIRST/LAST and ASC/DESC.

+ * + *

Example: SELECT a, b FROM t ORDER BY a, b DESC NULLS FIRST;

+ */ + ORDER_BY_COLUMN, + + /** + * Support to order the result by expressions. + * + *

Attention: This includes the capability to specify NULLS FIRST/LAST and ASC/DESC

+ * + *

Example: SELECT a FROM t ORDER BY abs(a) ASC NULLS LAST;

+ */ + ORDER_BY_EXPRESSION, + + /** + * Support to limit the number of result rows. Often used together with ordering. + * + *

Example: SELECT * FROM t 0

+ */ + LIMIT, + + /** + * Support to limit the number of result rows including an offset. + * + *

Example: SELECT * FROM t LIMIT 100 OFFSET 10;

+ */ + LIMIT_WITH_OFFSET +} diff --git a/jdbc-adapter/virtualschema-common/src/main/java/com/exasol/adapter/capabilities/PredicateCapability.java b/jdbc-adapter/virtualschema-common/src/main/java/com/exasol/adapter/capabilities/PredicateCapability.java new file mode 100644 index 000000000..7e0863f4e --- /dev/null +++ b/jdbc-adapter/virtualschema-common/src/main/java/com/exasol/adapter/capabilities/PredicateCapability.java @@ -0,0 +1,38 @@ +package com.exasol.adapter.capabilities; + +import com.exasol.adapter.sql.Predicate; + +/** + * List of all Predicates (scalar functions returning bool) supported by EXASOL. + */ +public enum PredicateCapability { + AND, + OR, + NOT, + EQUAL, + NOTEQUAL, + LESS, + LESSEQUAL, + LIKE, + LIKE_ESCAPE (Predicate.LIKE), // the LIKE predicate with the optional escape character defined + REGEXP_LIKE, + BETWEEN, + IN_CONSTLIST, + IS_NULL, + IS_NOT_NULL; + + private Predicate predicate; + + PredicateCapability() { + this.predicate = Predicate.valueOf(name()); + } + + PredicateCapability(Predicate predicate) { + this.predicate = predicate; + } + + public Predicate getPredicate() { + return predicate; + } +} + diff --git a/jdbc-adapter/virtualschema-common/src/main/java/com/exasol/adapter/capabilities/ScalarFunctionCapability.java b/jdbc-adapter/virtualschema-common/src/main/java/com/exasol/adapter/capabilities/ScalarFunctionCapability.java new file mode 100644 index 000000000..62ad5ebfa --- /dev/null +++ b/jdbc-adapter/virtualschema-common/src/main/java/com/exasol/adapter/capabilities/ScalarFunctionCapability.java @@ -0,0 +1,221 @@ +package com.exasol.adapter.capabilities; + +import com.exasol.adapter.sql.ScalarFunction; + +/** + * List of all scalar functions supported by EXASOL. Note that predicates are handled separately in {@link PredicateCapability}. + */ +public enum ScalarFunctionCapability { + + // Standard Arithmetic Operators + ADD, + SUB, + MULT, + FLOAT_DIV, + + // Unary prefix operators + NEG, + + // Numeric functions + ABS, + ACOS, + ASIN, + ATAN, + ATAN2, + CEIL, + COS, + COSH, + COT, + DEGREES, + DIV, + EXP, + FLOOR, + GREATEST, + LEAST, + LN, + LOG, + MOD, + POWER, + RADIANS, + RAND, + ROUND, + SIGN, + SIN, + SINH, + SQRT, + TAN, + TANH, + TRUNC, + // Aliases: + // LOG10 and LOG2 via LN + + // String Functions + ASCII, + BIT_LENGTH, + CHR, + COLOGNE_PHONETIC, + CONCAT, + DUMP, + EDIT_DISTANCE, + INSERT, + INSTR, + LENGTH, + LOCATE, + LOWER, + LPAD, + LTRIM, + OCTET_LENGTH, + REGEXP_INSTR, + REGEXP_REPLACE, + REGEXP_SUBSTR, + REPEAT, + REPLACE, + REVERSE, + RIGHT, + RPAD, + RTRIM, + SOUNDEX, + SPACE, + SUBSTR, + TRANSLATE, + TRIM, + UNICODE, + UNICODECHR, + UPPER, + // Aliases: + // POSITION via INSTR or LOCATE + // LCASE via LCASE + // MID via SUBSTR + // LEFT via SUBSTR? + // UCASE via UPPER + // CHARACTER_LENGTH via LENGTH? + + // Date/Time Functions + ADD_DAYS, + ADD_HOURS, + ADD_MINUTES, + ADD_MONTHS, + ADD_SECONDS, + ADD_WEEKS, + ADD_YEARS, + CONVERT_TZ, + CURRENT_DATE, + CURRENT_TIMESTAMP, + DATE_TRUNC, + DAY, + DAYS_BETWEEN, + DBTIMEZONE, + EXTRACT, + HOURS_BETWEEN, + LOCALTIMESTAMP, + MINUTE, + MINUTES_BETWEEN, + MONTH, + MONTHS_BETWEEN, + NUMTODSINTERVAL, + NUMTOYMINTERVAL, + POSIX_TIME, + SECOND, + SECONDS_BETWEEN, + SESSIONTIMEZONE, + SYSDATE, + SYSTIMESTAMP, + WEEK, + YEAR, + YEARS_BETWEEN, + + // Geospatial + // - Point Functions + ST_X, + ST_Y, + // - (Multi-)LineString Functions + ST_ENDPOINT, + ST_ISCLOSED, + ST_ISRING, + ST_LENGTH, + ST_NUMPOINTS, + ST_POINTN, + ST_STARTPOINT, + // - (Multi-)Polygon Functions + ST_AREA, + ST_EXTERIORRING, + ST_INTERIORRINGN, + ST_NUMINTERIORRINGS, + // - GeometryCollection Functions + ST_GEOMETRYN, + ST_NUMGEOMETRIES, + // - General Functions + ST_BOUNDARY, + ST_BUFFER, + ST_CENTROID, + ST_CONTAINS, + ST_CONVEXHULL, + ST_CROSSES, + ST_DIFFERENCE, + ST_DIMENSION, + ST_DISJOINT, + ST_DISTANCE, + ST_ENVELOPE, + ST_EQUALS, + ST_FORCE2D, + ST_GEOMETRYTYPE, + ST_INTERSECTION, + ST_INTERSECTS, + ST_ISEMPTY, + ST_ISSIMPLE, + ST_OVERLAPS, + ST_SETSRID, + ST_SYMDIFFERENCE, + ST_TOUCHES, + ST_TRANSFORM, + ST_UNION, + ST_WITHIN, + + // Conversion functions + CAST, // Has alias CONVERT + IS_NUMBER, + IS_BOOLEAN, + IS_DATE, + IS_DSINTERVAL, + IS_YMINTERVAL, + IS_TIMESTAMP, + TO_CHAR, + TO_DATE, + TO_DSINTERVAL, + TO_YMINTERVAL, + TO_NUMBER, + TO_TIMESTAMP, + + // Bitwise functions + BIT_AND, + BIT_CHECK, + BIT_NOT, + BIT_OR, + BIT_SET, + BIT_TO_NUM, + BIT_XOR, + + // Other functions + CASE, + CURRENT_SCHEMA, + CURRENT_SESSION, + CURRENT_STATEMENT, + CURRENT_USER, + HASH_MD5, + HASH_SHA, + HASH_SHA1, + HASH_TIGER, + NULLIFZERO, + SYS_GUID, + ZEROIFNULL + + // Skipped: Connect-By Functions + ; + + public ScalarFunction getFunction() { + // The set of capabilites and functions should be completely equal. + return ScalarFunction.valueOf(name()); + } + +} + diff --git a/jdbc-adapter/virtualschema-common/src/main/java/com/exasol/adapter/json/RequestJsonParser.java b/jdbc-adapter/virtualschema-common/src/main/java/com/exasol/adapter/json/RequestJsonParser.java new file mode 100644 index 000000000..bf492f5cc --- /dev/null +++ b/jdbc-adapter/virtualschema-common/src/main/java/com/exasol/adapter/json/RequestJsonParser.java @@ -0,0 +1,565 @@ +package com.exasol.adapter.json; + +import com.exasol.adapter.metadata.ColumnMetadata; +import com.exasol.adapter.metadata.DataType; +import com.exasol.adapter.metadata.DataType.ExaCharset; +import com.exasol.adapter.metadata.DataType.IntervalType; +import com.exasol.adapter.metadata.SchemaMetadataInfo; +import com.exasol.adapter.metadata.TableMetadata; +import com.exasol.adapter.request.*; +import com.exasol.adapter.sql.*; +import com.exasol.utils.JsonHelper; + +import javax.json.JsonArray; +import javax.json.JsonObject; +import javax.json.JsonString; +import javax.json.JsonValue; +import javax.json.JsonValue.ValueType; +import java.math.BigDecimal; +import java.util.ArrayList; +import java.util.HashMap; +import java.util.List; +import java.util.Map; + +public class RequestJsonParser { + + private List involvedTablesMetadata; + + public AdapterRequest parseRequest(String json) throws Exception { + JsonObject root = JsonHelper.getJsonObject(json); + String requestType = root.getString("type",""); + SchemaMetadataInfo meta = parseMetadataInfo(root); + if (requestType.equals("createVirtualSchema")) { + return new CreateVirtualSchemaRequest(meta); + } else if (requestType.equals("dropVirtualSchema")) { + return new DropVirtualSchemaRequest(meta); + } else if (requestType.equals("refresh")) { + if (root.containsKey("requestedTables")) { + List tables = new ArrayList<>(); + for (JsonString table : root.getJsonArray("requestedTables").getValuesAs(JsonString.class)) { + tables.add(table.getString()); + } + return new RefreshRequest(meta, tables); + } else { + return new RefreshRequest(meta); + } + } else if (requestType.equals("setProperties")) { + Map properties = new HashMap(); + assert(root.containsKey("properties") && root.get("properties").getValueType() == ValueType.OBJECT); + for (Map.Entry entry : root.getJsonObject("properties").entrySet()) { + String key = entry.getKey(); + // Null values represent properties which are deleted by the user (might also have never existed actually) + if (root.getJsonObject("properties").isNull(key)) { + properties.put(key.toUpperCase(), null); + } else { + properties.put(key.toUpperCase(), root.getJsonObject("properties").getString(key)); + } + } + return new SetPropertiesRequest(meta, properties); + } else if (requestType.equals("getCapabilities")) { + return new GetCapabilitiesRequest(meta); + } else if (requestType.equals("pushdown")) { + assert(root.containsKey("involvedTables") && root.get("involvedTables").getValueType() == ValueType.ARRAY); + involvedTablesMetadata = parseInvolvedTableMetadata(root.getJsonArray("involvedTables")); + JsonObject pushdownExp; + if (root.containsKey("pushdownRequest")) { + pushdownExp = root.getJsonObject("pushdownRequest"); + } else { + pushdownExp = root.getJsonObject("pushdownInquiry"); // This is outdated, remove when old versions are no longer used + } + SqlNode select = parseExpression(pushdownExp); + assert(select.getType() == SqlNodeType.SELECT); + return new PushdownRequest(meta, (SqlStatementSelect)select, involvedTablesMetadata); + } else { + throw new RuntimeException("Request Type not supported: " + requestType); + } + } + + private List parseInvolvedTableMetadata(JsonArray involvedTables) { + List tables = new ArrayList<>(); + for (JsonObject table : involvedTables.getValuesAs(JsonObject.class)) { + String tableName = table.getString("name",""); + String tableAdapterNotes = readAdapterNotes(table); + String tableComment = table.getString("comment", ""); + List columns = new ArrayList<>(); + for (JsonObject column : table.getJsonArray("columns").getValuesAs(JsonObject.class)) { + columns.add(parseColumnMetadata(column)); + } + tables.add(new TableMetadata(tableName, tableAdapterNotes, columns, tableComment)); + } + return tables; + } + + private ColumnMetadata parseColumnMetadata(JsonObject column) { + String columnName = column.getString("name"); + String adapterNotes = readAdapterNotes(column); + String comment = column.getString("comment", ""); + String defaultValue = column.getString("default", ""); + boolean isNullable = true; + if (column.containsKey("isNullable")) { + isNullable = column.getBoolean("isNullable"); + } + boolean isIdentity = true; + if (column.containsKey("isIdentity")) { + isIdentity = column.getBoolean("isIdentity"); + } + JsonObject dataType = column.getJsonObject("dataType"); + DataType type = getDataType(dataType); + return new ColumnMetadata(columnName, adapterNotes, type, isNullable, isIdentity, defaultValue, comment); + } + + private DataType getDataType(JsonObject dataType) { + String typeName = dataType.getString("type").toUpperCase(); + DataType type = null; + if (typeName.equals("DECIMAL")) { + type = DataType.createDecimal(dataType.getInt("precision"), dataType.getInt("scale")); + } else if (typeName.equals("DOUBLE")) { + type = DataType.createDouble(); + } else if (typeName.equals("VARCHAR")) { + String charSet = dataType.getString("characterSet", "UTF8"); + type = DataType.createVarChar(dataType.getInt("size"), charSetFromString(charSet)); + } else if (typeName.equals("CHAR")) { + String charSet = dataType.getString("characterSet", "UTF8"); + type = DataType.createChar(dataType.getInt("size"), charSetFromString(charSet)); + } else if (typeName.equals("BOOLEAN")) { + type = DataType.createBool(); + } else if (typeName.equals("DATE")) { + type = DataType.createDate(); + } else if (typeName.equals("TIMESTAMP")) { + boolean withLocalTimezone = dataType.getBoolean("withLocalTimeZone", false); + type = DataType.createTimestamp(withLocalTimezone); + } else if (typeName.equals("INTERVAL")) { + int precision = dataType.getInt("precision", 2); // has a default in EXASOL + IntervalType intervalType = intervalTypeFromString(dataType.getString("fromTo")); + if (intervalType == IntervalType.DAY_TO_SECOND) { + int fraction = dataType.getInt("fraction", 3); // has a default in EXASOL + type = DataType.createIntervalDaySecond(precision, fraction); + } else { + assert(intervalType == IntervalType.YEAR_TO_MONTH); + type = DataType.createIntervalYearMonth(precision); + } + } else if (typeName.equals("GEOMETRY")) { + int srid = dataType.getInt("srid"); + type = DataType.createGeometry(srid); + } else { + throw new RuntimeException("parseColumnMetadata: Unsupported Data Type: " + typeName); + } + return type; + } + + private static IntervalType intervalTypeFromString(String intervalType) { + if (intervalType.equals("DAY TO SECONDS")) { + return IntervalType.DAY_TO_SECOND; + } else if (intervalType.equals("YEAR TO MONTH")) { + return IntervalType.YEAR_TO_MONTH; + } else { + throw new RuntimeException("Unexpected interval type: " + intervalType); + } + } + + private static ExaCharset charSetFromString(String charset) { + if (charset.equals("UTF8")) { + return ExaCharset.UTF8; + } else if (charset.equals("ASCII")) { + return ExaCharset.ASCII; + } else { + throw new RuntimeException("Unexpected Charset: " + charset); + } + } + + private SqlStatementSelect parseSelect(JsonObject select) { + // FROM clause + SqlNode table = parseExpression(select.getJsonObject("from")); + assert(table.getType() == SqlNodeType.TABLE); + // SELECT list + SqlSelectList selectList = parseSelectList(select.getJsonArray("selectList")); + // GROUP BY +// boolean hasAggregation = false; +// boolean hasGroupBy = false; +// if (select.containsKey("aggregationType")) { +// hasAggregation = true; +// hasGroupBy = select.getString("aggregationType").equals("group_by"); +// } + SqlExpressionList groupByClause = parseGroupBy(select.getJsonArray("groupBy")); + + // WHERE clause + SqlNode whereClause = null; + if (select.containsKey("filter")) { + whereClause = parseExpression(select.getJsonObject("filter")); + } + SqlNode having = null; + if (select.containsKey("having")) { + having = parseExpression(select.getJsonObject("having")); + } + SqlOrderBy orderBy = null; + if (select.containsKey("orderBy")) { + orderBy = parseOrderBy(select.getJsonArray("orderBy")); + } + SqlLimit limit = null; + if (select.containsKey("limit")) { + limit = parseLimit(select.getJsonObject("limit")); + } + return new SqlStatementSelect((SqlTable)table, selectList, whereClause, groupByClause, having, orderBy, limit); + } + + private List parseExpressionList(JsonArray array) { + assert(array != null); + List sqlNodes = new ArrayList<>(); + for (JsonObject expr : array.getValuesAs(JsonObject.class)) { + SqlNode node = parseExpression(expr); + sqlNodes.add(node); + } + return sqlNodes; + } + + private SqlGroupBy parseGroupBy(JsonArray groupBy) { + if (groupBy == null) { + return null; + } + List groupByElements = parseExpressionList(groupBy); + return new SqlGroupBy(groupByElements); + } + + private SqlSelectList parseSelectList(JsonArray selectList) { + if (selectList == null) { + // this is like SELECT * + return new SqlSelectList(); + } + List selectListElements = parseExpressionList(selectList); + return new SqlSelectList(selectListElements); + } + + private SqlOrderBy parseOrderBy(JsonArray orderByList) { + List orderByExpressions = new ArrayList<>(); + List isAsc = new ArrayList<>(); + List nullsLast = new ArrayList<>(); + for (int i=0; i properties = new HashMap(); + if (meta.getJsonObject("properties") != null) { + for (Map.Entry entry : meta.getJsonObject("properties").entrySet()) { + String key = entry.getKey(); + properties.put(key.toUpperCase(), meta.getJsonObject("properties").getString(key)); + } + } + return new SchemaMetadataInfo(schemaName, schemaAdapterNotes, properties); + } + + private static String readAdapterNotes(JsonObject root) { + if (root.containsKey("adapterNotes")) { + JsonValue notes = root.get("adapterNotes"); + if (notes.getValueType() == ValueType.STRING) { + // Return unquoted string + return ((JsonString)notes).getString(); + } else { + return notes.toString(); + } + } + return ""; + } + + private SqlNode parseExpression(JsonObject exp) { + String typeName = exp.getString("type", ""); + SqlNodeType type = fromTypeName(typeName); + switch (type) { + case SELECT: + return parseSelect(exp); + case TABLE: { + String tableName = exp.getString("name"); + TableMetadata tableMetadata = findInvolvedTableMetadata(tableName); + if (exp.containsKey("alias")) { + String tableAlias = exp.getString("alias"); + return new SqlTable(tableName, tableAlias, tableMetadata); + } else { + return new SqlTable(tableName, tableMetadata); + } + } + case COLUMN: { + int columnId = exp.getInt("columnNr"); + String columnName = exp.getString("name"); + String tableName = exp.getString("tableName"); + ColumnMetadata columnMetadata = findColumnMetadata(tableName, columnName); + return new SqlColumn(columnId, columnMetadata); + } + case LITERAL_NULL: { + return new SqlLiteralNull(); + } + case LITERAL_BOOL: { + boolean boolVal = exp.getBoolean("value"); + return new SqlLiteralBool(boolVal); + } + case LITERAL_DATE: { + String date = exp.getString("value"); + return new SqlLiteralDate(date); + } + case LITERAL_TIMESTAMP: { + String timestamp = exp.getString("value"); + return new SqlLiteralTimestamp(timestamp); + } + case LITERAL_TIMESTAMPUTC: { + String timestampUtc = exp.getString("value"); + return new SqlLiteralTimestampUtc(timestampUtc); + } + case LITERAL_DOUBLE: { + String doubleString = exp.getString("value"); + return new SqlLiteralDouble(Double.parseDouble(doubleString)); + } + case LITERAL_EXACTNUMERIC: { + BigDecimal exactVal = new BigDecimal( exp.getString("value")); + return new SqlLiteralExactnumeric(exactVal); + } + case LITERAL_STRING: { + String stringVal = exp.getString("value"); + return new SqlLiteralString(stringVal); + } + case LITERAL_INTERVAL: { + String intervalVal = exp.getString("value"); + DataType intervalType = getDataType(exp.getJsonObject("dataType")); + return new SqlLiteralInterval(intervalVal, intervalType); + } + case PREDICATE_AND: { + List andedPredicates = new ArrayList<>(); + for (JsonObject pred : exp.getJsonArray("expressions").getValuesAs(JsonObject.class)) { + andedPredicates.add(parseExpression(pred)); + } + return new SqlPredicateAnd(andedPredicates); + } + case PREDICATE_OR: { + List orPredicates = new ArrayList<>(); + for (JsonObject pred : exp.getJsonArray("expressions").getValuesAs(JsonObject.class)) { + orPredicates.add(parseExpression(pred)); + } + return new SqlPredicateOr(orPredicates); + } + case PREDICATE_NOT: { + SqlNode notExp = parseExpression(exp.getJsonObject("expression")); + return new SqlPredicateNot(notExp); + } + case PREDICATE_EQUAL: { + SqlNode equalLeft = parseExpression(exp.getJsonObject("left")); + SqlNode equalRight = parseExpression(exp.getJsonObject("right")); + return new SqlPredicateEqual(equalLeft, equalRight); + } + case PREDICATE_NOTEQUAL: { + SqlNode notEqualLeft = parseExpression(exp.getJsonObject("left")); + SqlNode notEqualRight = parseExpression(exp.getJsonObject("right")); + return new SqlPredicateNotEqual(notEqualLeft, notEqualRight); + } + case PREDICATE_LESS: { + SqlNode lessLeft = parseExpression(exp.getJsonObject("left")); + SqlNode lessRight = parseExpression(exp.getJsonObject("right")); + return new SqlPredicateLess(lessLeft, lessRight); + } + case PREDICATE_LESSEQUAL: { + SqlNode lessEqLeft = parseExpression(exp.getJsonObject("left")); + SqlNode lessEqRight = parseExpression(exp.getJsonObject("right")); + return new SqlPredicateLessEqual(lessEqLeft, lessEqRight); + } + case PREDICATE_LIKE: { + SqlNode likeLeft = parseExpression(exp.getJsonObject("expression")); + SqlNode likePattern = parseExpression(exp.getJsonObject("pattern")); + if (exp.containsKey("escapeChar")) { + SqlNode escapeChar = parseExpression(exp.getJsonObject("escapeChar")); + return new SqlPredicateLike(likeLeft, likePattern, escapeChar); + } + return new SqlPredicateLike(likeLeft, likePattern); + } + case PREDICATE_LIKE_REGEXP: { + SqlNode likeRegexpLeft = parseExpression(exp.getJsonObject("expression")); + SqlNode likeRegexpPattern = parseExpression(exp.getJsonObject("pattern")); + return new SqlPredicateLikeRegexp(likeRegexpLeft, likeRegexpPattern); + } + case PREDICATE_BETWEEN: { + SqlNode betweenExp = parseExpression(exp.getJsonObject("expression")); + SqlNode betweenLeft = parseExpression(exp.getJsonObject("left")); + SqlNode betweenRight = parseExpression(exp.getJsonObject("right")); + return new SqlPredicateBetween(betweenExp, betweenLeft, betweenRight); + } + case PREDICATE_IN_CONSTLIST: { + SqlNode inExp = parseExpression(exp.getJsonObject("expression")); + List inArguments = new ArrayList<>(); + for (JsonObject pred : exp.getJsonArray("arguments").getValuesAs(JsonObject.class)) { + inArguments.add(parseExpression(pred)); + } + return new SqlPredicateInConstList(inExp, inArguments); + } + case PREDICATE_IS_NULL: { + SqlNode isnullExp = parseExpression(exp.getJsonObject("expression")); + return new SqlPredicateIsNull(isnullExp); + } + case PREDICATE_IS_NOT_NULL: { + SqlNode isNotnullExp = parseExpression(exp.getJsonObject("expression")); + return new SqlPredicateIsNotNull(isNotnullExp); + } + case FUNCTION_SCALAR: { + String functionName = exp.getString("name"); + boolean hasVariableInputArgs = false; + int numArgs; + if (exp.containsKey("variableInputArgs")) { + hasVariableInputArgs = exp.getBoolean("variableInputArgs"); + } + List arguments = new ArrayList<>(); + for (JsonObject argument : exp.getJsonArray("arguments").getValuesAs(JsonObject.class)) { + arguments.add(parseExpression(argument)); + } + if (!hasVariableInputArgs) { + numArgs = exp.getInt("numArgs"); // this is the expected number of arguments for this scalar function + assert (numArgs == arguments.size()); + } + boolean isInfix = false; + if (exp.containsKey("infix")) { + isInfix = exp.getBoolean("infix"); + } + boolean isPrefix = false; + if (exp.containsKey("prefix")) { + assert (!isPrefix); + isPrefix = exp.getBoolean("prefix"); + } + return new SqlFunctionScalar(fromScalarFunctionName(functionName), arguments, isInfix, isPrefix); + } + case FUNCTION_SCALAR_EXTRACT: { + SqlNode extractExpr = parseExpression(exp.getJsonObject("expression")); + String dateTime = exp.getString("dateTime"); + return new SqlFunctionScalarExtract(dateTime, extractExpr); + } + case FUNCTION_SCALAR_CASE: { + List caseArguments = new ArrayList<>(); + List caseResults = new ArrayList<>(); + SqlNode caseBasis = null; + if (exp.containsKey("arguments")) { + for (JsonObject argument : exp.getJsonArray("arguments").getValuesAs(JsonObject.class)) { + caseArguments.add(parseExpression(argument)); + } + } + if (exp.containsKey("results")) { + for (JsonObject argument : exp.getJsonArray("results").getValuesAs(JsonObject.class)) { + caseResults.add(parseExpression(argument)); + } + } + if (exp.containsKey("basis")) { + caseBasis = parseExpression(exp.getJsonObject("basis")); + } + return new SqlFunctionScalarCase(caseArguments, caseResults, caseBasis); + } + case FUNCTION_SCALAR_CAST: { + DataType castDataType = getDataType(exp.getJsonObject("dataType")); + SqlNode castExpr = parseExpression(exp.getJsonObject("expression")); + + return new SqlFunctionScalarCast(castDataType, castExpr); + } + case FUNCTION_AGGREGATE: { + String setFunctionName = exp.getString("name"); + List setArguments = new ArrayList<>(); + boolean distinct = false; + if (exp.containsKey("distinct")) { + distinct = exp.getBoolean("distinct"); + } + if (exp.containsKey("arguments")) { + for (JsonObject argument : exp.getJsonArray("arguments").getValuesAs(JsonObject.class)) { + setArguments.add(parseExpression(argument)); + } + } + return new SqlFunctionAggregate(fromAggregationFunctionName(setFunctionName), setArguments, distinct); + } + case FUNCTION_AGGREGATE_GROUP_CONCAT: { + String functionName = exp.getString("name"); + List groupConcatOrderByExpressions = new ArrayList<>(); + List groupConcatAscending = new ArrayList<>(); + List groupConcatNullsFirst = new ArrayList<>(); + + boolean distinctGroupConcat = false; + if (exp.containsKey("distinct")) { + distinctGroupConcat = exp.getBoolean("distinct"); + } + SqlNode concatExpression = parseExpression(exp.getJsonObject("concatExpression")); + if (exp.containsKey("arguments")) { + for (JsonObject argument : exp.getJsonArray("arguments").getValuesAs(JsonObject.class)) { + groupConcatOrderByExpressions.add(parseExpression(argument)); + } + } + if (exp.containsKey("ascendingOrder")) { + JsonArray array = exp.getJsonArray("ascendingOrder"); + for (int i = 0; i < array.size(); i++) { + groupConcatAscending.add(array.getBoolean(i)); + } + } + if (exp.containsKey("nullsFirstOrder")) { + JsonArray array = exp.getJsonArray("nullsFirstOrder"); + for (int i = 0; i < array.size(); i++) { + groupConcatNullsFirst.add(array.getBoolean(i)); + } + } + String separator = null; + if (exp.containsKey("separator")) { + separator = exp.getString("separator"); + } + return new SqlFunctionAggregateGroupConcat(fromAggregationFunctionName(functionName), + concatExpression, groupConcatOrderByExpressions, distinctGroupConcat, groupConcatAscending, + groupConcatNullsFirst, separator); + } + default: + throw new RuntimeException("Unknown node type: " + typeName); + } + } + + /** + * Mapping from scalar function name (as in json api) to enum + */ + private static ScalarFunction fromScalarFunctionName(String functionName) { + return Enum.valueOf(ScalarFunction.class, functionName.toUpperCase()); + } + + /** + * Mapping from aggregate function name (as in json api) to enum + */ + private static AggregateFunction fromAggregationFunctionName(String functionName) { + return Enum.valueOf(AggregateFunction.class, functionName.toUpperCase()); + } + + /** + * Mapping from type name (as in json api) to enum + */ + private static SqlNodeType fromTypeName(String typeName) { + return Enum.valueOf(SqlNodeType.class, typeName.toUpperCase()); + } + + private TableMetadata findInvolvedTableMetadata(String tableName) { + assert(involvedTablesMetadata != null); + for (TableMetadata tableMetadata : involvedTablesMetadata) { + if (tableMetadata.getName().equals(tableName)) { + return tableMetadata; + } + } + throw new RuntimeException("Could not find table metadata for involved table " + tableName + ". All involved tables: " + involvedTablesMetadata.toString()); + } + + private ColumnMetadata findColumnMetadata(String tableName, String columnName) { + TableMetadata tableMetadata = findInvolvedTableMetadata(tableName); + for (ColumnMetadata columnMetadata : tableMetadata.getColumns()) { + if (columnMetadata.getName().equals(columnName)) { + return columnMetadata; + } + } + throw new RuntimeException("Could not find column metadata for involved table " + tableName + " and column + " + columnName + ". All involved tables: " + involvedTablesMetadata.toString()); + } +} diff --git a/jdbc-adapter/virtualschema-common/src/main/java/com/exasol/adapter/json/ResponseJsonSerializer.java b/jdbc-adapter/virtualschema-common/src/main/java/com/exasol/adapter/json/ResponseJsonSerializer.java new file mode 100644 index 000000000..7646a8636 --- /dev/null +++ b/jdbc-adapter/virtualschema-common/src/main/java/com/exasol/adapter/json/ResponseJsonSerializer.java @@ -0,0 +1,90 @@ +package com.exasol.adapter.json; + +import javax.json.Json; +import javax.json.JsonArrayBuilder; +import javax.json.JsonBuilderFactory; +import javax.json.JsonObject; +import javax.json.JsonObjectBuilder; + +import com.exasol.adapter.capabilities.*; +import com.exasol.adapter.metadata.SchemaMetadata; +import com.exasol.utils.JsonHelper; + +public class ResponseJsonSerializer { + + public static final String SCALAR_FUNCTION_PREFIX = "FN_"; + public static final String PREDICATE_PREFIX = "FN_PRED_"; + public static final String AGGREGATE_FUNCTION_PREFIX = "FN_AGG_"; + public static final String LITERAL_PREFIX = "LITERAL_"; + + public static String makeCreateVirtualSchemaResponse(SchemaMetadata remoteMeta) { + JsonObject res = Json.createObjectBuilder() + .add("type", "createVirtualSchema") + .add("schemaMetadata", SchemaMetadataSerializer.serialize(remoteMeta)) + .build(); + return res.toString(); + } + + public static String makeDropVirtualSchemaResponse() { + JsonBuilderFactory factory = JsonHelper.getBuilderFactory(); + JsonObject res = factory.createObjectBuilder() + .add("type", "dropVirtualSchema") + .build(); + return res.toString(); + } + + public static String makeGetCapabilitiesResponse(Capabilities capabilities) { + JsonBuilderFactory factory = JsonHelper.getBuilderFactory(); + JsonObjectBuilder builder = factory.createObjectBuilder() + .add("type", "getCapabilities"); + JsonArrayBuilder arrayBuilder = factory.createArrayBuilder(); + for (MainCapability capability : capabilities.getMainCapabilities()) { + String capName = capability.name(); + arrayBuilder.add(capName); + } + for (ScalarFunctionCapability function : capabilities.getScalarFunctionCapabilities()) { + String capName = SCALAR_FUNCTION_PREFIX + function.name(); + arrayBuilder.add(capName); + } + for (PredicateCapability predicate : capabilities.getPredicateCapabilities()) { + String capName = PREDICATE_PREFIX + predicate.name(); + arrayBuilder.add(capName); + } + for (AggregateFunctionCapability function : capabilities.getAggregateFunctionCapabilities()) { + String capName = AGGREGATE_FUNCTION_PREFIX + function.name(); + arrayBuilder.add(capName); + } + for (LiteralCapability literal : capabilities.getLiteralCapabilities()) { + String capName = LITERAL_PREFIX + literal.name(); + arrayBuilder.add(capName); + } + builder.add("capabilities", arrayBuilder); + return builder.build().toString(); + } + + public static String makePushdownResponse(String pushdownSql) { + JsonBuilderFactory factory = JsonHelper.getBuilderFactory(); + JsonObject res = factory.createObjectBuilder() + .add("type", "pushdown") + .add("sql", pushdownSql) + .build(); + return res.toString(); + } + + public static String makeSetPropertiesResponse(SchemaMetadata remoteMeta) { + JsonObjectBuilder builder = Json.createObjectBuilder(); + builder.add("type", "setProperties"); + if (remoteMeta != null) { + builder.add("schemaMetadata", SchemaMetadataSerializer.serialize(remoteMeta)); + } + return builder.build().toString(); + } + + public static String makeRefreshResponse(SchemaMetadata remoteMeta) { + JsonObjectBuilder builder = Json.createObjectBuilder(); + builder.add("type", "refresh"); + builder.add("schemaMetadata", SchemaMetadataSerializer.serialize(remoteMeta)); + return builder.build().toString(); + } + +} diff --git a/jdbc-adapter/virtualschema-common/src/main/java/com/exasol/adapter/json/SchemaMetadataSerializer.java b/jdbc-adapter/virtualschema-common/src/main/java/com/exasol/adapter/json/SchemaMetadataSerializer.java new file mode 100644 index 000000000..a6690f862 --- /dev/null +++ b/jdbc-adapter/virtualschema-common/src/main/java/com/exasol/adapter/json/SchemaMetadataSerializer.java @@ -0,0 +1,61 @@ +package com.exasol.adapter.json; + +import javax.json.Json; +import javax.json.JsonArrayBuilder; +import javax.json.JsonBuilderFactory; +import javax.json.JsonObjectBuilder; + +import com.exasol.adapter.metadata.ColumnMetadata; +import com.exasol.adapter.metadata.SchemaMetadata; +import com.exasol.adapter.metadata.TableMetadata; +import com.exasol.utils.JsonHelper; + +public class SchemaMetadataSerializer { + + public static JsonObjectBuilder serialize(SchemaMetadata schema) { + JsonBuilderFactory factory = JsonHelper.getBuilderFactory(); + JsonObjectBuilder root = factory.createObjectBuilder(); + JsonArrayBuilder tablesBuilder = factory.createArrayBuilder(); + for (TableMetadata table : schema.getTables()) { + tablesBuilder.add(serializeTableMetadata(table, factory.createObjectBuilder())); + } + root.add("tables", tablesBuilder); + root.add("adapterNotes", schema.getAdapterNotes()); + return root; + } + + private static JsonObjectBuilder serializeTableMetadata(TableMetadata table, JsonObjectBuilder tableBuilder) { + tableBuilder.add("type", "table"); + tableBuilder.add("name", table.getName()); + JsonArrayBuilder columnsBuilder = Json.createArrayBuilder(); + for (ColumnMetadata column : table.getColumns()) { + columnsBuilder.add(serializeColumnMetadata(column, Json.createObjectBuilder())); + } + tableBuilder.add("adapterNotes", table.getAdapterNotes()); + if (table.hasComment()) { + tableBuilder.add("comment", table.getComment()); + } + tableBuilder.add("columns", columnsBuilder); + return tableBuilder; + } + + private static JsonObjectBuilder serializeColumnMetadata(ColumnMetadata column, JsonObjectBuilder columnBuilder) { + columnBuilder.add("name", column.getName()); + columnBuilder.add("adapterNotes", column.getAdapterNotes()); + columnBuilder.add("dataType", SqlDataTypeJsonSerializer.serialize(column.getType())); + if (!column.isNullable()) { + columnBuilder.add("isNullable", false); + } + if (column.isIdentity()) { + columnBuilder.add("isIdentity", true); + } + if (column.hasDefault()) { + columnBuilder.add("default", column.getDefaultValue()); + } + if (column.hasComment()) { + columnBuilder.add("comment", column.getComment()); + } + return columnBuilder; + } + +} diff --git a/jdbc-adapter/virtualschema-common/src/main/java/com/exasol/adapter/json/SqlDataTypeJsonSerializer.java b/jdbc-adapter/virtualschema-common/src/main/java/com/exasol/adapter/json/SqlDataTypeJsonSerializer.java new file mode 100644 index 000000000..1f8865f7b --- /dev/null +++ b/jdbc-adapter/virtualschema-common/src/main/java/com/exasol/adapter/json/SqlDataTypeJsonSerializer.java @@ -0,0 +1,107 @@ +package com.exasol.adapter.json; + +import javax.json.Json; +import javax.json.JsonObjectBuilder; + +import com.exasol.adapter.metadata.DataType; +import com.exasol.adapter.metadata.DataType.ExaCharset; +import com.exasol.adapter.metadata.DataType.ExaDataType; +import com.exasol.adapter.metadata.DataType.IntervalType; + +public class SqlDataTypeJsonSerializer { + + public static JsonObjectBuilder serialize(DataType dataType) { + JsonObjectBuilder root = Json.createObjectBuilder() + .add("type", exaTypeAsString(dataType.getExaDataType())); + + switch (dataType.getExaDataType()) { + case UNSUPPORTED: + throw new RuntimeException("Unsupported Data Type, should never happen"); + case DECIMAL: + root.add("precision", dataType.getPrecision()); + root.add("scale", dataType.getScale()); + break; + case DOUBLE: + break; + case VARCHAR: + root.add("size", dataType.getSize()); + root.add("characterSet", exaCharSetAsString(dataType.getCharset())); + break; + case CHAR: + root.add("size", dataType.getSize()); + root.add("characterSet", exaCharSetAsString(dataType.getCharset())); + break; + case DATE: + break; + case TIMESTAMP: + root.add("withLocalTimeZone", dataType.isWithLocalTimezone()); + break; + case BOOLEAN: + break; + case GEOMETRY: + root.add("srid", dataType.getGeometrySrid()); + break; + case INTERVAL: + root.add("fromTo", intervalTypeAsString(dataType.getIntervalType())); + root.add("precision", dataType.getPrecision()); + if (dataType.getIntervalType() == IntervalType.DAY_TO_SECOND) { + root.add("fraction", dataType.getIntervalFraction()); + } + break; + default: + throw new RuntimeException("Unexpected Data Type: " + dataType.getExaDataType()); + } + + return root; + } + + private static String exaTypeAsString(ExaDataType dataType) { + switch (dataType) { + case UNSUPPORTED: + return "unsupported"; + case DECIMAL: + return "decimal"; + case DOUBLE: + return "double"; + case VARCHAR: + return "varchar"; + case CHAR: + return "char"; + case DATE: + return "date"; + case TIMESTAMP: + return "timestamp"; + case BOOLEAN: + return "boolean"; + case GEOMETRY: + return "geometry"; + case INTERVAL: + return "interval"; + default: + return "unknown"; + } + } + + private static String exaCharSetAsString(ExaCharset charset) { + switch (charset) { + case UTF8: + return "UTF8"; + case ASCII: + return "ASCII"; + default: + throw new RuntimeException("Unexpected Charset: " + charset); + } + } + + private static String intervalTypeAsString(IntervalType intervalType) { + switch (intervalType) { + case DAY_TO_SECOND: + return "DAY TO SECONDS"; + case YEAR_TO_MONTH: + return "YEAR TO MONTH"; + default: + throw new RuntimeException("Unexpected IntervalType: " + intervalType); + } + } + +} diff --git a/jdbc-adapter/virtualschema-common/src/main/java/com/exasol/adapter/metadata/ColumnMetadata.java b/jdbc-adapter/virtualschema-common/src/main/java/com/exasol/adapter/metadata/ColumnMetadata.java new file mode 100644 index 000000000..a79a47172 --- /dev/null +++ b/jdbc-adapter/virtualschema-common/src/main/java/com/exasol/adapter/metadata/ColumnMetadata.java @@ -0,0 +1,64 @@ +package com.exasol.adapter.metadata; + + +/** + * Represents the metadata of an EXASOL table column. + */ +public class ColumnMetadata { + + private String name; + private String adapterNotes; + private DataType type; + private boolean isNullable; // has a third state "unknown" in jdbc, which has to be mapped by the adapter to yes or no + private boolean isIdentity; // auto increment in the sense of jdbc. However, jdbc has a third state ("empty", i.e. could not determine) which is mapped to false here. + private String defaultValue; // special case: "NULL" means SQL's NULL. + private String comment; // comes from "REMARKS" field in jdbc + // Primary Keys?!?! => Index, Cardinality = num rows + + public ColumnMetadata(String name, String adapterNotes, DataType type, boolean nullable, boolean isIdentity, String defaultValue, String comment) { + this.name = name; + this.adapterNotes = adapterNotes; + this.type = type; + this.isNullable = nullable; + this.isIdentity = isIdentity; + this.defaultValue = defaultValue; + this.comment = comment; + } + + public String getName() { + return name; + } + + public String getAdapterNotes() { + return adapterNotes; + } + + public DataType getType() { + return type; + } + + public boolean isNullable() { + return isNullable; + } + + public boolean isIdentity() { + return isIdentity; + } + + public boolean hasDefault() { + return !defaultValue.isEmpty(); + } + + public String getDefaultValue() { + return defaultValue; + } + + public boolean hasComment() { + return !comment.isEmpty(); + } + + public String getComment() { + return comment; + } + +} diff --git a/jdbc-adapter/virtualschema-common/src/main/java/com/exasol/adapter/metadata/DataType.java b/jdbc-adapter/virtualschema-common/src/main/java/com/exasol/adapter/metadata/DataType.java new file mode 100644 index 000000000..529153744 --- /dev/null +++ b/jdbc-adapter/virtualschema-common/src/main/java/com/exasol/adapter/metadata/DataType.java @@ -0,0 +1,227 @@ +package com.exasol.adapter.metadata; + + +/** + * Represents an EXASOL datatype. + */ +public class DataType { + + public static int maxExasolCharSize = 2000; + public static int maxExasolVarcharSize = 2000000; + public static int maxExasolDecimalPrecision = 36; + + private ExaDataType exaDataType; + private int precision; + private int scale; + private int size; + private ExaCharset charset; + private boolean withLocalTimezone; + private int geometrySrid; + private IntervalType intervalType; + private int intervalFraction; + + public enum ExaDataType { + UNSUPPORTED, + DECIMAL, + DOUBLE, + VARCHAR, + CHAR, + DATE, + TIMESTAMP, + BOOLEAN, + GEOMETRY, + INTERVAL + } + + public enum ExaCharset { + UTF8, + ASCII + } + + public enum IntervalType { + DAY_TO_SECOND, + YEAR_TO_MONTH + } + + private DataType() { + } + + public static DataType createVarChar(int size, ExaCharset charset) { + DataType type = new DataType(); + type.exaDataType = ExaDataType.VARCHAR; + type.size = size; + type.charset = charset; + return type; + } + + public static DataType createChar(int size, ExaCharset charset) { + DataType type = new DataType(); + type.exaDataType = ExaDataType.CHAR; + type.size = size; + type.charset = charset; + return type; + } + + public static DataType createDecimal(int precision, int scale) { + DataType type = new DataType(); + type.exaDataType = ExaDataType.DECIMAL; + type.precision = precision; + type.scale = scale; + return type; + } + + public static DataType createDouble() { + DataType type = new DataType(); + type.exaDataType = ExaDataType.DOUBLE; + return type; + } + + public static DataType createDate() { + DataType type = new DataType(); + type.exaDataType = ExaDataType.DATE; + return type; + } + + public static DataType createTimestamp(boolean withLocalTimezone) { + DataType type = new DataType(); + type.exaDataType = ExaDataType.TIMESTAMP; + type.withLocalTimezone = withLocalTimezone; + return type; + } + + public static DataType createBool() { + DataType type = new DataType(); + type.exaDataType = ExaDataType.BOOLEAN; + return type; + } + + public static DataType createGeometry(int srid) { + DataType type = new DataType(); + type.exaDataType = ExaDataType.GEOMETRY; + type.geometrySrid = srid; + return type; + } + + public static DataType createIntervalDaySecond(int precision, int fraction) { + DataType type = new DataType(); + type.exaDataType = ExaDataType.INTERVAL; + type.intervalType = IntervalType.DAY_TO_SECOND; + type.precision = precision; + type.intervalFraction = fraction; + return type; + } + + public static DataType createIntervalYearMonth(int precision) { + DataType type = new DataType(); + type.exaDataType = ExaDataType.INTERVAL; + type.intervalType = IntervalType.YEAR_TO_MONTH; + type.precision = precision; + return type; + } + + public ExaDataType getExaDataType() { + return exaDataType; + } + + public int getPrecision() { + return precision; + } + + public int getScale() { + return scale; + } + + public int getSize() { + return size; + } + + public ExaCharset getCharset() { + return charset; + } + + public boolean isWithLocalTimezone() { + return withLocalTimezone; + } + + public int getGeometrySrid() { + return geometrySrid; + } + + public IntervalType getIntervalType() { + return intervalType; + } + + public int getIntervalFraction() { + return intervalFraction; + } + + @Override + public String toString() { + StringBuilder builder = new StringBuilder(); + switch (exaDataType) { + case UNSUPPORTED: + builder.append("UNSUPPORTED"); + break; + case DECIMAL: + builder.append("DECIMAL("); + builder.append(precision); + builder.append(", "); + builder.append(scale); + builder.append(")"); + break; + case DOUBLE: + builder.append("DOUBLE"); + break; + case VARCHAR: + builder.append("VARCHAR("); + builder.append(size); + builder.append(") "); + builder.append(charset.toString()); + break; + case CHAR: + builder.append("CHAR("); + builder.append(size); + builder.append(") "); + builder.append(charset.toString()); + break; + case DATE: + builder.append("DATE"); + break; + case TIMESTAMP: + builder.append("TIMESTAMP"); + if (withLocalTimezone) { + builder.append(" WITH LOCAL TIME ZONE"); + } + break; + case BOOLEAN: + builder.append("BOOLEAN"); + break; + case GEOMETRY: + builder.append("GEOMETRY"); + builder.append("("); + builder.append(geometrySrid); + builder.append(")"); + break; + case INTERVAL: + builder.append("INTERVAL "); + if (intervalType == IntervalType.YEAR_TO_MONTH) { + builder.append("YEAR"); + builder.append(" ("); + builder.append(precision); + builder.append(")"); + builder.append(" TO MONTH"); + } else { + builder.append("DAY"); + builder.append(" ("); + builder.append(precision); + builder.append(")"); + builder.append(" TO SECOND"); + builder.append(" ("); + builder.append(intervalFraction); + builder.append(")"); + } + break; + } + return builder.toString(); + } +} diff --git a/jdbc-adapter/virtualschema-common/src/main/java/com/exasol/adapter/metadata/SchemaMetadata.java b/jdbc-adapter/virtualschema-common/src/main/java/com/exasol/adapter/metadata/SchemaMetadata.java new file mode 100644 index 000000000..20e8b07e8 --- /dev/null +++ b/jdbc-adapter/virtualschema-common/src/main/java/com/exasol/adapter/metadata/SchemaMetadata.java @@ -0,0 +1,28 @@ +package com.exasol.adapter.metadata; + +import java.util.List; + +/** + * Represents the metadata of an EXASOL Virtual Schema, including tables and columns. + * These metadata are are returned by the Adapter when creating a virtual schema or + * when the adapter updates the metadata (during refresh or set property). + */ +public class SchemaMetadata { + + private String adapterNotes; + private List tables; + + public SchemaMetadata(String adapterNotes, List tables) { + this.adapterNotes = adapterNotes; + this.tables = tables; + } + + public String getAdapterNotes() { + return adapterNotes; + } + + public List getTables() { + return tables; + } + +} diff --git a/jdbc-adapter/virtualschema-common/src/main/java/com/exasol/adapter/metadata/SchemaMetadataInfo.java b/jdbc-adapter/virtualschema-common/src/main/java/com/exasol/adapter/metadata/SchemaMetadataInfo.java new file mode 100644 index 000000000..a97dc47cc --- /dev/null +++ b/jdbc-adapter/virtualschema-common/src/main/java/com/exasol/adapter/metadata/SchemaMetadataInfo.java @@ -0,0 +1,47 @@ +package com.exasol.adapter.metadata; + +import java.util.Map; + +import com.google.common.base.MoreObjects; + +/** + * Represents the metadata of an EXASOL Virtual Schema which are sent with each request. + * The metadata are just "for information" for the adapter. These metadata don't contain the table metadata. + */ +public class SchemaMetadataInfo { + + private String schemaName; + private String adapterNotes; + private Map properties; + + public SchemaMetadataInfo(String schemaName, String adapterNotes, Map properties) { + this.schemaName = schemaName; + this.adapterNotes = adapterNotes; + this.properties = properties; + } + + @Override + public String toString() { + return MoreObjects.toStringHelper(this) + .add("schemaName", schemaName) + .add("adapterNotes", adapterNotes) + .add("properties", properties) + .toString(); + } + + public String getSchemaName() { + return schemaName; + } + + public String getAdapterNotes() { + return adapterNotes; + } + + /** + * \note Keys are case-insensitive and stored upper case + */ + public Map getProperties() { + return properties; + } + +} diff --git a/jdbc-adapter/virtualschema-common/src/main/java/com/exasol/adapter/metadata/TableMetadata.java b/jdbc-adapter/virtualschema-common/src/main/java/com/exasol/adapter/metadata/TableMetadata.java new file mode 100644 index 000000000..8a4ac0cd9 --- /dev/null +++ b/jdbc-adapter/virtualschema-common/src/main/java/com/exasol/adapter/metadata/TableMetadata.java @@ -0,0 +1,57 @@ +package com.exasol.adapter.metadata; + +import java.util.List; + +import com.google.common.base.MoreObjects; + +/** + * Represents the metadata of an EXASOL table. + */ +public class TableMetadata { + + private String name; + private String adapterNotes; + private List columns; + private String comment; + + public TableMetadata(String name, String adapterNotes, List columns, String comment) { + this.name = name; + this.adapterNotes = adapterNotes; + this.columns = columns; + this.comment = comment; + if (this.columns.isEmpty()) { + throw new RuntimeException("Error: Adapter tried to return a table without columns: " + this.name); + } + } + + @Override + public String toString() { + return MoreObjects.toStringHelper(this) + .add("name", name) + .add("adapterNotes", adapterNotes) + .add("columns", columns) + .add("comment", comment) + .toString(); + } + + public String getName() { + return name; + } + + public String getAdapterNotes() { + return adapterNotes; + } + + public List getColumns() { + return columns; + } + + public boolean hasComment() { + return !comment.isEmpty(); + } + + public String getComment() { + return comment; + } + +} diff --git a/jdbc-adapter/virtualschema-common/src/main/java/com/exasol/adapter/request/AdapterRequest.java b/jdbc-adapter/virtualschema-common/src/main/java/com/exasol/adapter/request/AdapterRequest.java new file mode 100644 index 000000000..0fbffe36d --- /dev/null +++ b/jdbc-adapter/virtualschema-common/src/main/java/com/exasol/adapter/request/AdapterRequest.java @@ -0,0 +1,31 @@ +package com.exasol.adapter.request; + +import com.exasol.adapter.metadata.SchemaMetadataInfo; + +public class AdapterRequest { + + public enum AdapterRequestType { + CREATE_VIRTUAL_SCHEMA, + DROP_VIRTUAL_SCHEMA, + REFRESH, + SET_PROPERTIES, + GET_CAPABILITIES, + PUSHDOWN + } + + private SchemaMetadataInfo schemaMetadataInfo; + private AdapterRequestType type; + + AdapterRequest(SchemaMetadataInfo schemaMetadataInfo, AdapterRequestType type) { + this.schemaMetadataInfo = schemaMetadataInfo; + this.type = type; + } + + public SchemaMetadataInfo getSchemaMetadataInfo() { + return schemaMetadataInfo; + } + + public AdapterRequestType getType() { + return type; + } +} diff --git a/jdbc-adapter/virtualschema-common/src/main/java/com/exasol/adapter/request/CreateVirtualSchemaRequest.java b/jdbc-adapter/virtualschema-common/src/main/java/com/exasol/adapter/request/CreateVirtualSchemaRequest.java new file mode 100644 index 000000000..2f9746596 --- /dev/null +++ b/jdbc-adapter/virtualschema-common/src/main/java/com/exasol/adapter/request/CreateVirtualSchemaRequest.java @@ -0,0 +1,10 @@ +package com.exasol.adapter.request; + +import com.exasol.adapter.metadata.SchemaMetadataInfo; + +public class CreateVirtualSchemaRequest extends AdapterRequest { + + public CreateVirtualSchemaRequest(SchemaMetadataInfo schemaMetadataInfo) { + super(schemaMetadataInfo, AdapterRequestType.CREATE_VIRTUAL_SCHEMA); + } +} diff --git a/jdbc-adapter/virtualschema-common/src/main/java/com/exasol/adapter/request/DropVirtualSchemaRequest.java b/jdbc-adapter/virtualschema-common/src/main/java/com/exasol/adapter/request/DropVirtualSchemaRequest.java new file mode 100644 index 000000000..8edf202f6 --- /dev/null +++ b/jdbc-adapter/virtualschema-common/src/main/java/com/exasol/adapter/request/DropVirtualSchemaRequest.java @@ -0,0 +1,10 @@ +package com.exasol.adapter.request; + +import com.exasol.adapter.metadata.SchemaMetadataInfo; + +public class DropVirtualSchemaRequest extends AdapterRequest { + + public DropVirtualSchemaRequest(SchemaMetadataInfo schemaMetadataInfo) { + super(schemaMetadataInfo, AdapterRequestType.DROP_VIRTUAL_SCHEMA); + } +} diff --git a/jdbc-adapter/virtualschema-common/src/main/java/com/exasol/adapter/request/GetCapabilitiesRequest.java b/jdbc-adapter/virtualschema-common/src/main/java/com/exasol/adapter/request/GetCapabilitiesRequest.java new file mode 100644 index 000000000..a7f538614 --- /dev/null +++ b/jdbc-adapter/virtualschema-common/src/main/java/com/exasol/adapter/request/GetCapabilitiesRequest.java @@ -0,0 +1,10 @@ +package com.exasol.adapter.request; + +import com.exasol.adapter.metadata.SchemaMetadataInfo; + +public class GetCapabilitiesRequest extends AdapterRequest { + + public GetCapabilitiesRequest(SchemaMetadataInfo schemaMetadataInfo) { + super(schemaMetadataInfo, AdapterRequestType.GET_CAPABILITIES); + } +} diff --git a/jdbc-adapter/virtualschema-common/src/main/java/com/exasol/adapter/request/PushdownRequest.java b/jdbc-adapter/virtualschema-common/src/main/java/com/exasol/adapter/request/PushdownRequest.java new file mode 100644 index 000000000..f8fe781db --- /dev/null +++ b/jdbc-adapter/virtualschema-common/src/main/java/com/exasol/adapter/request/PushdownRequest.java @@ -0,0 +1,27 @@ +package com.exasol.adapter.request; + +import java.util.List; + +import com.exasol.adapter.metadata.SchemaMetadataInfo; +import com.exasol.adapter.metadata.TableMetadata; +import com.exasol.adapter.sql.SqlStatement; + +public class PushdownRequest extends AdapterRequest { + + private SqlStatement select; + private List involvedTablesMetadata; + + public PushdownRequest(SchemaMetadataInfo schemaMetadataInfo, SqlStatement select, List involvedTablesMetadata) { + super(schemaMetadataInfo, AdapterRequestType.PUSHDOWN); + this.select = select; + this.involvedTablesMetadata = involvedTablesMetadata; + } + + public SqlStatement getSelect() { + return select; + } + + public List getInvolvedTablesMetadata() { + return involvedTablesMetadata; + } +} diff --git a/jdbc-adapter/virtualschema-common/src/main/java/com/exasol/adapter/request/RefreshRequest.java b/jdbc-adapter/virtualschema-common/src/main/java/com/exasol/adapter/request/RefreshRequest.java new file mode 100644 index 000000000..1b57a7219 --- /dev/null +++ b/jdbc-adapter/virtualschema-common/src/main/java/com/exasol/adapter/request/RefreshRequest.java @@ -0,0 +1,32 @@ +package com.exasol.adapter.request; + +import java.util.List; + +import com.exasol.adapter.metadata.SchemaMetadataInfo; + +public class RefreshRequest extends AdapterRequest { + + private boolean isRefreshForTables; + private List tables; + + public RefreshRequest(SchemaMetadataInfo schemaMetadataInfo) { + super(schemaMetadataInfo, AdapterRequestType.REFRESH); + isRefreshForTables = false; + } + + public RefreshRequest(SchemaMetadataInfo schemaMetadataInfo, List tables) { + super(schemaMetadataInfo, AdapterRequestType.REFRESH); + assert(tables != null && !tables.isEmpty()); + isRefreshForTables = true; + this.tables = tables; + } + + public List getTables() { + return tables; + } + + public boolean isRefreshForTables() { + return isRefreshForTables; + } + +} diff --git a/jdbc-adapter/virtualschema-common/src/main/java/com/exasol/adapter/request/SetPropertiesRequest.java b/jdbc-adapter/virtualschema-common/src/main/java/com/exasol/adapter/request/SetPropertiesRequest.java new file mode 100644 index 000000000..26ad513ad --- /dev/null +++ b/jdbc-adapter/virtualschema-common/src/main/java/com/exasol/adapter/request/SetPropertiesRequest.java @@ -0,0 +1,19 @@ +package com.exasol.adapter.request; + +import java.util.Map; + +import com.exasol.adapter.metadata.SchemaMetadataInfo; + +public class SetPropertiesRequest extends AdapterRequest { + + private Map properties; + + public SetPropertiesRequest(SchemaMetadataInfo schemaMetadataInfo, Map properties) { + super(schemaMetadataInfo, AdapterRequestType.SET_PROPERTIES); + this.properties = properties; + } + + public Map getProperties() { + return properties; + } +} diff --git a/jdbc-adapter/virtualschema-common/src/main/java/com/exasol/adapter/sql/AggregateFunction.java b/jdbc-adapter/virtualschema-common/src/main/java/com/exasol/adapter/sql/AggregateFunction.java new file mode 100644 index 000000000..5313db911 --- /dev/null +++ b/jdbc-adapter/virtualschema-common/src/main/java/com/exasol/adapter/sql/AggregateFunction.java @@ -0,0 +1,50 @@ +package com.exasol.adapter.sql; + +/** + * List of all aggregation functions supported by EXASOL + */ +public enum AggregateFunction { + COUNT, + SUM, + MIN, + MAX, + AVG, + + MEDIAN, + + FIRST_VALUE, + LAST_VALUE, + + STDDEV, + STDDEV_POP, + STDDEV_SAMP, + + VARIANCE, + VAR_POP, + VAR_SAMP, + + GROUP_CONCAT(false), + + APPROXIMATE_COUNT_DISTINCT, + + GEO_INTERSECTION_AGGREGATE, + GEO_UNION_AGGREGATE; + + private boolean isSimple; + + /** + * True if the function is simple, i.e. is handled by {@link SqlFunctionAggregate}, and false if it has it's own implementation. + */ + public boolean isSimple() { + return isSimple; + } + + AggregateFunction() { + this.isSimple = true; + } + + AggregateFunction(boolean isSimple) { + this.isSimple = isSimple; + } + +} diff --git a/jdbc-adapter/virtualschema-common/src/main/java/com/exasol/adapter/sql/Predicate.java b/jdbc-adapter/virtualschema-common/src/main/java/com/exasol/adapter/sql/Predicate.java new file mode 100644 index 000000000..0087d9304 --- /dev/null +++ b/jdbc-adapter/virtualschema-common/src/main/java/com/exasol/adapter/sql/Predicate.java @@ -0,0 +1,20 @@ +package com.exasol.adapter.sql; + +/** + * List of all predicates (scalar functions returning bool) supported by EXASOL. + */ +public enum Predicate { + AND, + OR, + NOT, + EQUAL, + NOTEQUAL, + LESS, + LESSEQUAL, + LIKE, + REGEXP_LIKE, + BETWEEN, + IN_CONSTLIST, + IS_NULL, + IS_NOT_NULL +} diff --git a/jdbc-adapter/virtualschema-common/src/main/java/com/exasol/adapter/sql/ScalarFunction.java b/jdbc-adapter/virtualschema-common/src/main/java/com/exasol/adapter/sql/ScalarFunction.java new file mode 100644 index 000000000..4c905ddff --- /dev/null +++ b/jdbc-adapter/virtualschema-common/src/main/java/com/exasol/adapter/sql/ScalarFunction.java @@ -0,0 +1,212 @@ +package com.exasol.adapter.sql; + +/** + * List of all scalar functions supported by EXASOL. Note that predicates are defined separately in {@link Predicate}. + */ +public enum ScalarFunction { + + // Standard Arithmetic Operators + ADD, + SUB, + MULT, + FLOAT_DIV, + + // Unary prefix operators + NEG, + + // Numeric + ABS, + ACOS, + ASIN, + ATAN, + ATAN2, + CEIL, + COS, + COSH, + COT, + DEGREES, + DIV, + EXP, + FLOOR, + GREATEST, + LEAST, + LN, + LOG, + MOD, + POWER, + RADIANS, + RAND, + ROUND, + SIGN, + SIN, + SINH, + SQRT, + TAN, + TANH, + TRUNC, + + // String Functions + ASCII, + BIT_LENGTH, + CHR, + COLOGNE_PHONETIC, + CONCAT, + DUMP, + EDIT_DISTANCE, + INSERT, + INSTR, + LENGTH, + LOCATE, + LOWER, + LPAD, + LTRIM, + OCTET_LENGTH, + REGEXP_INSTR, + REGEXP_REPLACE, + REGEXP_SUBSTR, + REPEAT, + REPLACE, + REVERSE, + RIGHT, + RPAD, + RTRIM, + SOUNDEX, + SPACE, + SUBSTR, + TRANSLATE, + TRIM, + UNICODE, + UNICODECHR, + UPPER, + + // Date/Time Functions + ADD_DAYS, + ADD_HOURS, + ADD_MINUTES, + ADD_MONTHS, + ADD_SECONDS, + ADD_WEEKS, + ADD_YEARS, + CONVERT_TZ, + CURRENT_DATE, + CURRENT_TIMESTAMP, + DATE_TRUNC, + DAY, + DAYS_BETWEEN, + DBTIMEZONE, + EXTRACT(false), + HOURS_BETWEEN, + LOCALTIMESTAMP, + MINUTE, + MINUTES_BETWEEN, + MONTH, + MONTHS_BETWEEN, + NUMTODSINTERVAL, + NUMTOYMINTERVAL, + POSIX_TIME, + SECOND, + SECONDS_BETWEEN, + SESSIONTIMEZONE, + SYSDATE, + SYSTIMESTAMP, + WEEK, + YEAR, + YEARS_BETWEEN, + + // Geospatial + ST_X, + ST_Y, + ST_ENDPOINT, + ST_ISCLOSED, + ST_ISRING, + ST_LENGTH, + ST_NUMPOINTS, + ST_POINTN, + ST_STARTPOINT, + ST_AREA, + ST_EXTERIORRING, + ST_INTERIORRINGN, + ST_NUMINTERIORRINGS, + ST_GEOMETRYN, + ST_NUMGEOMETRIES, + ST_BOUNDARY, + ST_BUFFER, + ST_CENTROID, + ST_CONTAINS, + ST_CONVEXHULL, + ST_CROSSES, + ST_DIFFERENCE, + ST_DIMENSION, + ST_DISJOINT, + ST_DISTANCE, + ST_ENVELOPE, + ST_EQUALS, + ST_FORCE2D, + ST_GEOMETRYTYPE, + ST_INTERSECTION, + ST_INTERSECTS, + ST_ISEMPTY, + ST_ISSIMPLE, + ST_OVERLAPS, + ST_SETSRID, + ST_SYMDIFFERENCE, + ST_TOUCHES, + ST_TRANSFORM, + ST_UNION, + ST_WITHIN, + + // Conversion functions + CAST(false), + IS_NUMBER, + IS_BOOLEAN, + IS_DATE, + IS_DSINTERVAL, + IS_YMINTERVAL, + IS_TIMESTAMP, + TO_CHAR, + TO_DATE, + TO_DSINTERVAL, + TO_YMINTERVAL, + TO_NUMBER, + TO_TIMESTAMP, + + // Bitwise functions + BIT_AND, + BIT_CHECK, + BIT_NOT, + BIT_OR, + BIT_SET, + BIT_TO_NUM, + BIT_XOR, + + // Other Functions + CASE(false), + CURRENT_SCHEMA, + CURRENT_SESSION, + CURRENT_STATEMENT, + CURRENT_USER, + HASH_MD5, + HASH_SHA, + HASH_SHA1, + HASH_TIGER, + NULLIFZERO, + SYS_GUID, + ZEROIFNULL; + + private boolean isSimple; + + /** + * True if the function is simple, i.e. is handled by {@link SqlFunctionScalar}, and false if it has it's own implementation. + */ + public boolean isSimple() { + return isSimple; + } + + ScalarFunction() { + this.isSimple = true; + } + + ScalarFunction(boolean isSimple) { + this.isSimple = isSimple; + } +} diff --git a/jdbc-adapter/virtualschema-common/src/main/java/com/exasol/adapter/sql/SqlColumn.java b/jdbc-adapter/virtualschema-common/src/main/java/com/exasol/adapter/sql/SqlColumn.java new file mode 100644 index 000000000..2cd5c18bd --- /dev/null +++ b/jdbc-adapter/virtualschema-common/src/main/java/com/exasol/adapter/sql/SqlColumn.java @@ -0,0 +1,42 @@ +package com.exasol.adapter.sql; + +import com.exasol.adapter.metadata.ColumnMetadata; + +public class SqlColumn extends SqlNode { + + private int id; + private ColumnMetadata metadata; + + public SqlColumn(int id, ColumnMetadata metadata) { + this.id = id; + this.metadata = metadata; + } + + public int getId() { + return id; + } + + public ColumnMetadata getMetadata() { + return metadata; + } + + public String getName() { + return metadata.getName(); + } + + @Override + public String toSimpleSql() { + return "\"" + metadata.getName().replace("\"", "\"\"") + "\""; + } + + @Override + public SqlNodeType getType() { + return SqlNodeType.COLUMN; + } + + @Override + public R accept(SqlNodeVisitor visitor) { + return visitor.visit(this); + } + +} diff --git a/jdbc-adapter/virtualschema-common/src/main/java/com/exasol/adapter/sql/SqlExpressionList.java b/jdbc-adapter/virtualschema-common/src/main/java/com/exasol/adapter/sql/SqlExpressionList.java new file mode 100644 index 000000000..e86231381 --- /dev/null +++ b/jdbc-adapter/virtualschema-common/src/main/java/com/exasol/adapter/sql/SqlExpressionList.java @@ -0,0 +1,16 @@ +package com.exasol.adapter.sql; + +import java.util.List; + +public abstract class SqlExpressionList extends SqlNode { + + public SqlExpressionList() { + + } + + public SqlExpressionList(List expressions) { + super(expressions); + assert(expressions != null); + } + +} diff --git a/jdbc-adapter/virtualschema-common/src/main/java/com/exasol/adapter/sql/SqlFunctionAggregate.java b/jdbc-adapter/virtualschema-common/src/main/java/com/exasol/adapter/sql/SqlFunctionAggregate.java new file mode 100644 index 000000000..7e32c39b3 --- /dev/null +++ b/jdbc-adapter/virtualschema-common/src/main/java/com/exasol/adapter/sql/SqlFunctionAggregate.java @@ -0,0 +1,64 @@ +package com.exasol.adapter.sql; + +import com.google.common.base.Joiner; + +import java.util.ArrayList; +import java.util.List; + +/** + * A simple aggregate function with a name and zero or more arguments. Distinct is also allowed. + * + *

Aggregate functions that are more complex, like GroupConcat, are defined in separate classes. + *

+ */ +public class SqlFunctionAggregate extends SqlNode { + + private AggregateFunction function; + private boolean distinct; + + public SqlFunctionAggregate(AggregateFunction function, List arguments, boolean distinct) { + setSons(arguments); + this.function = function; + this.distinct = distinct; + } + + public AggregateFunction getFunction() { + return function; + } + + public String getFunctionName() { + return function.name(); + } + + public boolean hasDistinct() { + return distinct; + } + + @Override + public String toSimpleSql() { + List argumentsSql = new ArrayList<>(); + for (SqlNode node : getSons()) { + argumentsSql.add(node.toSimpleSql()); + } + if (argumentsSql.size() == 0) { + assert(getFunctionName().equalsIgnoreCase("count")); + argumentsSql.add("*"); + } + String distinctSql = ""; + if (distinct) { + distinctSql = "DISTINCT "; + } + return getFunctionName() + "(" + distinctSql + Joiner.on(", ").join(argumentsSql) + ")"; + } + + @Override + public SqlNodeType getType() { + return SqlNodeType.FUNCTION_AGGREGATE; + } + + @Override + public R accept(SqlNodeVisitor visitor) { + return visitor.visit(this); + } + +} diff --git a/jdbc-adapter/virtualschema-common/src/main/java/com/exasol/adapter/sql/SqlFunctionAggregateGroupConcat.java b/jdbc-adapter/virtualschema-common/src/main/java/com/exasol/adapter/sql/SqlFunctionAggregateGroupConcat.java new file mode 100644 index 000000000..1eaccd286 --- /dev/null +++ b/jdbc-adapter/virtualschema-common/src/main/java/com/exasol/adapter/sql/SqlFunctionAggregateGroupConcat.java @@ -0,0 +1,123 @@ +package com.exasol.adapter.sql; + +import java.util.ArrayList; +import java.util.List; + + +public class SqlFunctionAggregateGroupConcat extends SqlNode { + + private AggregateFunction function; + private boolean distinct; + private SqlNode concatExpression; + private String separator; + // Lists describing the ORDER BY expressions (must all have same length) + private List orderByExpressions; + private List ascendingOrder; + private List nullsFirstOrder; + + public SqlFunctionAggregateGroupConcat(AggregateFunction function, SqlNode concatExpression, + List orderByExpressions, boolean distinct, + List ascendingOrder, List nullsFirstOrder, + String separator) { + assert(orderByExpressions.size() == ascendingOrder.size()); + assert(ascendingOrder.size() == nullsFirstOrder.size()); + List sons = new ArrayList<>(); + sons.add(concatExpression); + sons.addAll(orderByExpressions); + setSons(sons); + this.function = function; + this.distinct = distinct; + this.concatExpression = concatExpression; + this.orderByExpressions = orderByExpressions; + this.ascendingOrder = ascendingOrder; + this.nullsFirstOrder = nullsFirstOrder; + this.separator = separator; + } + + public AggregateFunction getFunction() { + return function; + } + + public SqlNode getConcatExpression() { + return concatExpression; + } + + public List getAscendingOrderList() { + return ascendingOrder; + } + + public List getNullsFirstOrderList() { + return nullsFirstOrder; + } + + public String getFunctionName() { + return function.name(); + } + + public String getSeparator() { + return separator; + } + + public List getOrderByExpressions() { + return orderByExpressions; + } + + public boolean hasDistinct() { + return distinct; + } + + @Override + public String toSimpleSql() { + List argumentsSql = new ArrayList<>(); + for (SqlNode node : getSons()) { + argumentsSql.add(node.toSimpleSql()); + } + if (argumentsSql.size() == 0) { + assert(getFunctionName().equalsIgnoreCase("count")); + argumentsSql.add("*"); + } + String distinctSql = ""; + if (distinct) { + distinctSql = "DISTINCT "; + } + StringBuilder builder = new StringBuilder(); + builder.append(getFunctionName()); + builder.append("("); + builder.append(distinctSql); + builder.append(concatExpression.toSimpleSql()); + if (getOrderByExpressions().size() > 0) { + builder.append(" ORDER BY "); + for (int i = 0; i < getSons().size(); i++) { + if (i > 0) { + builder.append(", "); + } + builder.append(getSon(i).toSimpleSql()); + if (!getAscendingOrderList().get(i)) { + builder.append(" DESC"); + } + if (getNullsFirstOrderList().get(i)) { + builder.append(" NULLS FIRST"); + } + } + } + if (separator != null) { + builder.append(" SEPARATOR "); + builder.append("'"); + builder.append(separator); + builder.append("'"); + } + builder.append(")"); + return builder.toString(); + } + + @Override + public SqlNodeType getType() { + return SqlNodeType.FUNCTION_AGGREGATE; + } + + @Override + public R accept(SqlNodeVisitor visitor) { + return visitor.visit(this); + } + +} diff --git a/jdbc-adapter/virtualschema-common/src/main/java/com/exasol/adapter/sql/SqlFunctionScalar.java b/jdbc-adapter/virtualschema-common/src/main/java/com/exasol/adapter/sql/SqlFunctionScalar.java new file mode 100644 index 000000000..329aafc2d --- /dev/null +++ b/jdbc-adapter/virtualschema-common/src/main/java/com/exasol/adapter/sql/SqlFunctionScalar.java @@ -0,0 +1,91 @@ +package com.exasol.adapter.sql; + +import java.util.ArrayList; +import java.util.HashMap; +import java.util.List; +import java.util.Map; + +import com.google.common.base.Joiner; + +/** + * A simple scalar function with a name and zero or more arguments. + * + *

Scalar functions that are more complex, like CASE or CAST, are defined in separate classes. + *

+ */ +public class SqlFunctionScalar extends SqlNode { + + private ScalarFunction function; + private boolean isInfix; + private boolean isPrefix; + + public SqlFunctionScalar(ScalarFunction function, List arguments, boolean isInfix, boolean isPrefix) { + setSons(arguments); + this.function = function; + this.isInfix = isInfix; + this.isPrefix = isPrefix; + } + + public ScalarFunction getFunction() { + return function; + } + + public String getFunctionName() { + return function.name(); + } + + public int getNumArgs() { + return getSons().size(); + } + + public boolean isInfix() { + return isInfix; + } + + public boolean isPrefix() { + return isPrefix; + } + + + @Override + public String toSimpleSql() { + List argumentsSql = new ArrayList<>(); + for (SqlNode node : getSons()) { + argumentsSql.add(node.toSimpleSql()); + } + if (isInfix) { + assert(argumentsSql.size()==2); + Map functionAliases = new HashMap(); + functionAliases.put("ADD", "+"); + functionAliases.put("SUB", "-"); + functionAliases.put("MULT", "*"); + functionAliases.put("FLOAT_DIV", "/"); + String realFunctionName = getFunctionName(); + if (functionAliases.containsKey(getFunctionName())) { + realFunctionName = functionAliases.get(getFunctionName()); + } + return "(" + argumentsSql.get(0) + " " + realFunctionName + " " + argumentsSql.get(1) + ")"; + } else if (isPrefix) { + assert(argumentsSql.size()==1); + Map functionAliases = new HashMap(); + functionAliases.put("NEG", "-"); + String realFunctionName = getFunctionName(); + if (functionAliases.containsKey(getFunctionName())) { + realFunctionName = functionAliases.get(getFunctionName()); + } + return "(" + realFunctionName + argumentsSql.get(1) + ")"; + } + return getFunctionName() + "(" + Joiner.on(", ").join(argumentsSql) + ")"; + } + + @Override + public SqlNodeType getType() { + return SqlNodeType.FUNCTION_SCALAR; + } + + @Override + public R accept(SqlNodeVisitor visitor) { + return visitor.visit(this); + } + +} diff --git a/jdbc-adapter/virtualschema-common/src/main/java/com/exasol/adapter/sql/SqlFunctionScalarCase.java b/jdbc-adapter/virtualschema-common/src/main/java/com/exasol/adapter/sql/SqlFunctionScalarCase.java new file mode 100644 index 000000000..7707891f2 --- /dev/null +++ b/jdbc-adapter/virtualschema-common/src/main/java/com/exasol/adapter/sql/SqlFunctionScalarCase.java @@ -0,0 +1,51 @@ +package com.exasol.adapter.sql; + + +import java.util.List; + +public class SqlFunctionScalarCase extends SqlNode { + private List arguments; + private List results; + private SqlNode basis; + + public SqlFunctionScalarCase(List arguments, List results, SqlNode basis) { + this.arguments = arguments; + this.results = results; + this.basis = basis; + } + + public List getArguments() { + return arguments; + } + + public List getResults() { + return results; + } + + public SqlNode getBasis() { + return basis; + } + + @Override + public String toSimpleSql() { + return "CASE"; + } + + @Override + public SqlNodeType getType() { + return SqlNodeType.FUNCTION_SCALAR_CASE; + } + + @Override + public R accept(SqlNodeVisitor visitor) { + return visitor.visit(this); + } + + public String getFunctionName() { + return "CASE"; + } + + public ScalarFunction getFunction() { + return ScalarFunction.CASE; + } +} diff --git a/jdbc-adapter/virtualschema-common/src/main/java/com/exasol/adapter/sql/SqlFunctionScalarCast.java b/jdbc-adapter/virtualschema-common/src/main/java/com/exasol/adapter/sql/SqlFunctionScalarCast.java new file mode 100644 index 000000000..ea02def7d --- /dev/null +++ b/jdbc-adapter/virtualschema-common/src/main/java/com/exasol/adapter/sql/SqlFunctionScalarCast.java @@ -0,0 +1,46 @@ +package com.exasol.adapter.sql; + +import com.exasol.adapter.metadata.DataType; + +public class SqlFunctionScalarCast extends SqlNode { + private DataType dataType; + private SqlNode expression; + + public SqlFunctionScalarCast(DataType dataType, SqlNode expression) { + this.expression = expression; + this.dataType = dataType; + } + + public DataType getDataType() { + return dataType; + } + + + public SqlNode getExpression() { + return expression; + } + + @Override + public String toSimpleSql() { + return "CAST (" + expression.toSimpleSql() + " AS " + getDataType().toString() + ")"; + } + + @Override + public SqlNodeType getType() { + return SqlNodeType.FUNCTION_SCALAR_CAST; + } + + @Override + public R accept(SqlNodeVisitor visitor) { + return visitor.visit(this); + } + + public String getFunctionName() { + return "CAST"; + } + + public ScalarFunction getFunction() { + return ScalarFunction.CAST; + } + +} diff --git a/jdbc-adapter/virtualschema-common/src/main/java/com/exasol/adapter/sql/SqlFunctionScalarExtract.java b/jdbc-adapter/virtualschema-common/src/main/java/com/exasol/adapter/sql/SqlFunctionScalarExtract.java new file mode 100644 index 000000000..b567d5fbd --- /dev/null +++ b/jdbc-adapter/virtualschema-common/src/main/java/com/exasol/adapter/sql/SqlFunctionScalarExtract.java @@ -0,0 +1,44 @@ +package com.exasol.adapter.sql; + +public class SqlFunctionScalarExtract extends SqlNode { + private String dateTime; + private SqlNode expression; + + public SqlFunctionScalarExtract(String dateTime, SqlNode expression) { + this.expression = expression; + this.dateTime = dateTime; + } + + public String getDateTime() { + return dateTime; + } + + + public SqlNode getExpression() { + return expression; + } + + @Override + public String toSimpleSql() { + return "EXTRACT (" + dateTime + " FROM " + expression.toSimpleSql() + ")"; + } + + @Override + public SqlNodeType getType() { + return SqlNodeType.FUNCTION_SCALAR_EXTRACT; + } + + @Override + public R accept(SqlNodeVisitor visitor) { + return visitor.visit(this); + } + + public String getFunctionName() { + return "EXTRACT"; + } + + public ScalarFunction getFunction() { + return ScalarFunction.EXTRACT; + } + +} diff --git a/jdbc-adapter/virtualschema-common/src/main/java/com/exasol/adapter/sql/SqlGroupBy.java b/jdbc-adapter/virtualschema-common/src/main/java/com/exasol/adapter/sql/SqlGroupBy.java new file mode 100644 index 000000000..d5f6e65f6 --- /dev/null +++ b/jdbc-adapter/virtualschema-common/src/main/java/com/exasol/adapter/sql/SqlGroupBy.java @@ -0,0 +1,40 @@ +package com.exasol.adapter.sql; + +import java.util.ArrayList; +import java.util.List; + +import com.google.common.base.Joiner; + +public class SqlGroupBy extends SqlExpressionList { + + public SqlGroupBy() { + + } + + public SqlGroupBy(List groupByList) { + super(groupByList); + } + + @Override + public SqlNodeType getType() { + return SqlNodeType.GROUP_BY; + } + + @Override + public String toSimpleSql() { + if (getSons().isEmpty()) { + return "*"; + } + List selectElement = new ArrayList<>(); + for (SqlNode node : getSons()) { + selectElement.add(node.toSimpleSql()); + } + return Joiner.on(", ").join(selectElement); + } + + @Override + public R accept(SqlNodeVisitor visitor) { + return visitor.visit(this); + } + +} diff --git a/jdbc-adapter/virtualschema-common/src/main/java/com/exasol/adapter/sql/SqlLimit.java b/jdbc-adapter/virtualschema-common/src/main/java/com/exasol/adapter/sql/SqlLimit.java new file mode 100644 index 000000000..365a4bad3 --- /dev/null +++ b/jdbc-adapter/virtualschema-common/src/main/java/com/exasol/adapter/sql/SqlLimit.java @@ -0,0 +1,59 @@ +package com.exasol.adapter.sql; + + +public class SqlLimit extends SqlNode { + + int limit; + int offset; + + public SqlLimit(int limit) { + this(limit, 0); + } + + public SqlLimit(int limit, int offset) { + this.limit = limit; + this.offset = offset; + assert(offset >= 0); + assert(limit >= 0); + } + + public int getLimit() { + return limit; + } + + public int getOffset() { + return offset; + } + + public boolean hasOffset() { + return offset != 0; + } + + public void setLimit(int limit) { + this.limit = limit; + } + + public void setOffset(int offset) { + this.offset = offset; + } + + @Override + public String toSimpleSql() { + String offsetSql = ""; + if (offset != 0) { + offsetSql = " OFFSET " + offset; + } + return "LIMIT " + limit + offsetSql; + } + + @Override + public SqlNodeType getType() { + return SqlNodeType.LIMIT; + } + + @Override + public R accept(SqlNodeVisitor visitor) { + return visitor.visit(this); + } + +} diff --git a/jdbc-adapter/virtualschema-common/src/main/java/com/exasol/adapter/sql/SqlLiteralBool.java b/jdbc-adapter/virtualschema-common/src/main/java/com/exasol/adapter/sql/SqlLiteralBool.java new file mode 100644 index 000000000..6a9bba217 --- /dev/null +++ b/jdbc-adapter/virtualschema-common/src/main/java/com/exasol/adapter/sql/SqlLiteralBool.java @@ -0,0 +1,35 @@ +package com.exasol.adapter.sql; + + +public class SqlLiteralBool extends SqlNode { + + private boolean value; + + public SqlLiteralBool(boolean value) { + this.value = value; + } + + public boolean getValue() { + return value; + } + + @Override + public String toSimpleSql() { + if (value) { + return "true"; + } else { + return "false"; + } + } + + @Override + public SqlNodeType getType() { + return SqlNodeType.LITERAL_BOOL; + } + + @Override + public R accept(SqlNodeVisitor visitor) { + return visitor.visit(this); + } + +} diff --git a/jdbc-adapter/virtualschema-common/src/main/java/com/exasol/adapter/sql/SqlLiteralDate.java b/jdbc-adapter/virtualschema-common/src/main/java/com/exasol/adapter/sql/SqlLiteralDate.java new file mode 100644 index 000000000..0bc0b4bd9 --- /dev/null +++ b/jdbc-adapter/virtualschema-common/src/main/java/com/exasol/adapter/sql/SqlLiteralDate.java @@ -0,0 +1,31 @@ +package com.exasol.adapter.sql; + + +public class SqlLiteralDate extends SqlNode { + + private String value; // Stored as YYYY-MM-DD + + public SqlLiteralDate(String value) { + this.value = value; + } + + public String getValue() { + return value; + } + + @Override + public String toSimpleSql() { + return "DATE '" + value + "'"; // This gets always executed as TO_DATE('2015-02-01','YYYY-MM-DD') + } + + @Override + public SqlNodeType getType() { + return SqlNodeType.LITERAL_DATE; + } + + @Override + public R accept(SqlNodeVisitor visitor) { + return visitor.visit(this); + } + +} diff --git a/jdbc-adapter/virtualschema-common/src/main/java/com/exasol/adapter/sql/SqlLiteralDouble.java b/jdbc-adapter/virtualschema-common/src/main/java/com/exasol/adapter/sql/SqlLiteralDouble.java new file mode 100644 index 000000000..85e596aa7 --- /dev/null +++ b/jdbc-adapter/virtualschema-common/src/main/java/com/exasol/adapter/sql/SqlLiteralDouble.java @@ -0,0 +1,31 @@ +package com.exasol.adapter.sql; + + +public class SqlLiteralDouble extends SqlNode { + + private double value; + + public SqlLiteralDouble(double value) { + this.value = value; + } + + public double getValue() { + return value; + } + + @Override + public String toSimpleSql() { + return Double.toString(value); + } + + @Override + public SqlNodeType getType() { + return SqlNodeType.LITERAL_DOUBLE; + } + + @Override + public R accept(SqlNodeVisitor visitor) { + return visitor.visit(this); + } + +} diff --git a/jdbc-adapter/virtualschema-common/src/main/java/com/exasol/adapter/sql/SqlLiteralExactnumeric.java b/jdbc-adapter/virtualschema-common/src/main/java/com/exasol/adapter/sql/SqlLiteralExactnumeric.java new file mode 100644 index 000000000..1ace92c3f --- /dev/null +++ b/jdbc-adapter/virtualschema-common/src/main/java/com/exasol/adapter/sql/SqlLiteralExactnumeric.java @@ -0,0 +1,32 @@ +package com.exasol.adapter.sql; + +import java.math.BigDecimal; + +public class SqlLiteralExactnumeric extends SqlNode { + + private BigDecimal value; + + public SqlLiteralExactnumeric(BigDecimal value) { + this.value = value; + } + + public BigDecimal getValue() { + return value; + } + + @Override + public String toSimpleSql() { + return value.toString(); + } + + @Override + public SqlNodeType getType() { + return SqlNodeType.LITERAL_EXACTNUMERIC; + } + + @Override + public R accept(SqlNodeVisitor visitor) { + return visitor.visit(this); + } + +} diff --git a/jdbc-adapter/virtualschema-common/src/main/java/com/exasol/adapter/sql/SqlLiteralInterval.java b/jdbc-adapter/virtualschema-common/src/main/java/com/exasol/adapter/sql/SqlLiteralInterval.java new file mode 100644 index 000000000..2339d39be --- /dev/null +++ b/jdbc-adapter/virtualschema-common/src/main/java/com/exasol/adapter/sql/SqlLiteralInterval.java @@ -0,0 +1,44 @@ +package com.exasol.adapter.sql; + + +import com.exasol.adapter.metadata.DataType; + +public class SqlLiteralInterval extends SqlNode { + + private String value; // stored as YYYY-MM-DD HH:MI:SS.FF6 + private DataType type; + + public SqlLiteralInterval(String value, DataType type) { + this.value = value; + this.type = type; + } + + public String getValue() { + return value; + } + + public DataType getDataType() { + return type; + } + + @Override + public String toSimpleSql() { + if (type.getIntervalType() == DataType.IntervalType.YEAR_TO_MONTH) { + return "INTERVAL '" + value.toString() + "' YEAR (" + type.getPrecision() + ") TO MONTH"; + } else { + return "INTERVAL '" + value.toString() + "' DAY (" + type.getPrecision() + + ") TO SECOND (" + type.getIntervalFraction() + ")"; + } + } + + @Override + public SqlNodeType getType() { + return SqlNodeType.LITERAL_TIMESTAMP; + } + + @Override + public R accept(SqlNodeVisitor visitor) { + return visitor.visit(this); + } + +} diff --git a/jdbc-adapter/virtualschema-common/src/main/java/com/exasol/adapter/sql/SqlLiteralNull.java b/jdbc-adapter/virtualschema-common/src/main/java/com/exasol/adapter/sql/SqlLiteralNull.java new file mode 100644 index 000000000..eb37d49cc --- /dev/null +++ b/jdbc-adapter/virtualschema-common/src/main/java/com/exasol/adapter/sql/SqlLiteralNull.java @@ -0,0 +1,21 @@ +package com.exasol.adapter.sql; + + +public class SqlLiteralNull extends SqlNode { + + @Override + public String toSimpleSql() { + return "NULL"; + } + + @Override + public SqlNodeType getType() { + return SqlNodeType.LITERAL_NULL; + } + + @Override + public R accept(SqlNodeVisitor visitor) { + return visitor.visit(this); + } + +} diff --git a/jdbc-adapter/virtualschema-common/src/main/java/com/exasol/adapter/sql/SqlLiteralString.java b/jdbc-adapter/virtualschema-common/src/main/java/com/exasol/adapter/sql/SqlLiteralString.java new file mode 100644 index 000000000..2d64b9195 --- /dev/null +++ b/jdbc-adapter/virtualschema-common/src/main/java/com/exasol/adapter/sql/SqlLiteralString.java @@ -0,0 +1,32 @@ +package com.exasol.adapter.sql; + + +public class SqlLiteralString extends SqlNode { + + private String value; + + public SqlLiteralString(String value) { + this.value = value; + } + + public String getValue() { + return value; + } + + @Override + public String toSimpleSql() { + // Don't forget to escape single quote + return "'" + value.replace("'", "''") + "'"; + } + + @Override + public SqlNodeType getType() { + return SqlNodeType.LITERAL_STRING; + } + + @Override + public R accept(SqlNodeVisitor visitor) { + return visitor.visit(this); + } + +} diff --git a/jdbc-adapter/virtualschema-common/src/main/java/com/exasol/adapter/sql/SqlLiteralTimestamp.java b/jdbc-adapter/virtualschema-common/src/main/java/com/exasol/adapter/sql/SqlLiteralTimestamp.java new file mode 100644 index 000000000..d45d910de --- /dev/null +++ b/jdbc-adapter/virtualschema-common/src/main/java/com/exasol/adapter/sql/SqlLiteralTimestamp.java @@ -0,0 +1,31 @@ +package com.exasol.adapter.sql; + + +public class SqlLiteralTimestamp extends SqlNode { + + private String value; // stored as YYYY-MM-DD HH:MI:SS.FF6 + + public SqlLiteralTimestamp(String value) { + this.value = value; + } + + public String getValue() { + return value; + } + + @Override + public String toSimpleSql() { + return "TIMESTAMP '" + value.toString() + "'"; + } + + @Override + public SqlNodeType getType() { + return SqlNodeType.LITERAL_TIMESTAMP; + } + + @Override + public R accept(SqlNodeVisitor visitor) { + return visitor.visit(this); + } + +} diff --git a/jdbc-adapter/virtualschema-common/src/main/java/com/exasol/adapter/sql/SqlLiteralTimestampUtc.java b/jdbc-adapter/virtualschema-common/src/main/java/com/exasol/adapter/sql/SqlLiteralTimestampUtc.java new file mode 100644 index 000000000..cc9ece54b --- /dev/null +++ b/jdbc-adapter/virtualschema-common/src/main/java/com/exasol/adapter/sql/SqlLiteralTimestampUtc.java @@ -0,0 +1,31 @@ +package com.exasol.adapter.sql; + + +public class SqlLiteralTimestampUtc extends SqlNode { + + private String value; // stored as YYYY-MM-DD HH:MI:SS.FF6 + + public SqlLiteralTimestampUtc(String value) { + this.value = value; + } + + public String getValue() { + return value; + } + + @Override + public String toSimpleSql() { + return "TIMESTAMP '" + value.toString() + "'"; + } + + @Override + public SqlNodeType getType() { + return SqlNodeType.LITERAL_TIMESTAMPUTC; + } + + @Override + public R accept(SqlNodeVisitor visitor) { + return visitor.visit(this); + } + +} diff --git a/jdbc-adapter/virtualschema-common/src/main/java/com/exasol/adapter/sql/SqlNode.java b/jdbc-adapter/virtualschema-common/src/main/java/com/exasol/adapter/sql/SqlNode.java new file mode 100644 index 000000000..489b1ea8f --- /dev/null +++ b/jdbc-adapter/virtualschema-common/src/main/java/com/exasol/adapter/sql/SqlNode.java @@ -0,0 +1,66 @@ +package com.exasol.adapter.sql; + +import java.util.ArrayList; +import java.util.List; + +/** + * Node in a graph representing a SQL query. + */ +public abstract class SqlNode { + + private List sons; + private SqlNode parent; + + public SqlNode() { + this.sons = new ArrayList<>(); + } + + public SqlNode(List sons) { + this.sons = sons; + // set parents + for (SqlNode son : sons) { + son.setParent(this); + } + } + + public abstract SqlNodeType getType(); + + public SqlNode getSon(int i) { + return sons.get(i); + } + + public List getSons() { + return sons; + } + + public void setSons(List sons) { + this.sons = sons; + // set parents + for (SqlNode son : sons) { + son.setParent(this); + } + } + + public void setParent(SqlNode parent) { + this.parent = parent; + } + + public SqlNode getParent() { + return parent; + } + + public boolean hasParent() { + return (this.parent != null); + } + + /** + * See {@link SqlNodeVisitor} + * @param visitor The visitor object on which the appropriate visit(sqlNode) method is called + */ + public abstract R accept(SqlNodeVisitor visitor); + + /** + * @return A SQL representation of the current graph, using EXASOL SQL syntax. It is called "SIMPLE" because it is not guaranteed to be 100 % correct SQL (e.g. might be ambiguous). + */ + abstract String toSimpleSql(); +} diff --git a/jdbc-adapter/virtualschema-common/src/main/java/com/exasol/adapter/sql/SqlNodeType.java b/jdbc-adapter/virtualschema-common/src/main/java/com/exasol/adapter/sql/SqlNodeType.java new file mode 100644 index 000000000..2801d339b --- /dev/null +++ b/jdbc-adapter/virtualschema-common/src/main/java/com/exasol/adapter/sql/SqlNodeType.java @@ -0,0 +1,55 @@ +package com.exasol.adapter.sql; + +/** + * All types of nodes that can be part of a pushdown request. + * Each type represents a different class inheriting from SqlNode. + */ +public enum SqlNodeType { + + SELECT, + + TABLE, + + SELECT_LIST, + GROUP_BY, + + COLUMN, + + LITERAL_NULL, + LITERAL_BOOL, + LITERAL_DATE, + LITERAL_TIMESTAMP, + LITERAL_TIMESTAMPUTC, + LITERAL_DOUBLE, + LITERAL_EXACTNUMERIC, + LITERAL_STRING, + LITERAL_INTERVAL, + + PREDICATE_AND, + PREDICATE_OR, + PREDICATE_NOT, + + PREDICATE_EQUAL, + PREDICATE_NOTEQUAL, + PREDICATE_LESS, + PREDICATE_LESSEQUAL, + + PREDICATE_LIKE, + PREDICATE_LIKE_REGEXP, + PREDICATE_BETWEEN, + PREDICATE_IN_CONSTLIST, + + PREDICATE_IS_NULL, + PREDICATE_IS_NOT_NULL, + + FUNCTION_SCALAR, + FUNCTION_SCALAR_CASE, + FUNCTION_SCALAR_CAST, + FUNCTION_SCALAR_EXTRACT, + FUNCTION_AGGREGATE, + FUNCTION_AGGREGATE_GROUP_CONCAT, + + ORDER_BY, + + LIMIT; +} diff --git a/jdbc-adapter/virtualschema-common/src/main/java/com/exasol/adapter/sql/SqlNodeVisitor.java b/jdbc-adapter/virtualschema-common/src/main/java/com/exasol/adapter/sql/SqlNodeVisitor.java new file mode 100644 index 000000000..b3397fd03 --- /dev/null +++ b/jdbc-adapter/virtualschema-common/src/main/java/com/exasol/adapter/sql/SqlNodeVisitor.java @@ -0,0 +1,85 @@ +package com.exasol.adapter.sql; + +/** + * Implementation of the Visitor pattern for the SqlNode. + * + * Benefit of this Visitor implementation: We get compile time safety that all + * Visitors have implementations for all SqlNode types. + * + * Drawback of this Visitor implementation: Whenever a new SqlNode gets added, + * we need to implement it here (should be fine for now). If this becomes to + * annoying, we can still switch to a visitor pattern using Reflection. + */ +public interface SqlNodeVisitor { + + public R visit(SqlStatementSelect select); + + public R visit(SqlSelectList selectList); + + public R visit(SqlGroupBy groupBy); + + public R visit(SqlColumn sqlColumn); + + public R visit(SqlFunctionAggregate sqlFunctionAggregate); + + public R visit(SqlFunctionAggregateGroupConcat sqlFunctionAggregateGroupConcat); + + public R visit(SqlFunctionScalar sqlFunctionScalar); + + public R visit(SqlFunctionScalarCase sqlFunctionScalarCase); + + public R visit(SqlFunctionScalarCast sqlFunctionScalarCast); + + public R visit(SqlFunctionScalarExtract sqlFunctionScalarExtract); + + public R visit(SqlLimit sqlLimit); + + public R visit(SqlLiteralBool sqlLiteralBool); + + public R visit(SqlLiteralDate sqlLiteralDate); + + public R visit(SqlLiteralDouble sqlLiteralDouble); + + public R visit(SqlLiteralExactnumeric sqlLiteralExactnumeric); + + public R visit(SqlLiteralNull sqlLiteralNull); + + public R visit(SqlLiteralString sqlLiteralString); + + public R visit(SqlLiteralTimestamp sqlLiteralTimestamp); + + public R visit(SqlLiteralTimestampUtc sqlLiteralTimestampUtc); + + public R visit(SqlLiteralInterval sqlLiteralInterval); + + public R visit(SqlOrderBy sqlOrderBy); + + public R visit(SqlPredicateAnd sqlPredicateAnd); + + public R visit(SqlPredicateBetween sqlPredicateBetween); + + public R visit(SqlPredicateEqual sqlPredicateEqual); + + public R visit(SqlPredicateInConstList sqlPredicateInConstList); + + public R visit(SqlPredicateLess sqlPredicateLess); + + public R visit(SqlPredicateLessEqual sqlPredicateLessEqual); + + public R visit(SqlPredicateLike sqlPredicateLike); + + public R visit(SqlPredicateLikeRegexp sqlPredicateLikeRegexp); + + public R visit(SqlPredicateNot sqlPredicateNot); + + public R visit(SqlPredicateNotEqual sqlPredicateNotEqual); + + public R visit(SqlPredicateOr sqlPredicateOr); + + public R visit(SqlPredicateIsNotNull sqlPredicateOr); + + public R visit(SqlPredicateIsNull sqlPredicateOr); + + public R visit(SqlTable sqlTable); + +} diff --git a/jdbc-adapter/virtualschema-common/src/main/java/com/exasol/adapter/sql/SqlOrderBy.java b/jdbc-adapter/virtualschema-common/src/main/java/com/exasol/adapter/sql/SqlOrderBy.java new file mode 100644 index 000000000..b6f4148e0 --- /dev/null +++ b/jdbc-adapter/virtualschema-common/src/main/java/com/exasol/adapter/sql/SqlOrderBy.java @@ -0,0 +1,64 @@ +package com.exasol.adapter.sql; + +import java.util.ArrayList; +import java.util.List; + +import com.google.common.base.Joiner; + +public class SqlOrderBy extends SqlNode { + + List expressions; + List isAsc; + + // True, if the desired position of nulls is at the end, false if at beginning. + // This does not necessarily mean the user explicitly specified NULLS LAST or NULLS FIRST. + List nullsLast; + + public SqlOrderBy(List expressions, List isAsc, List nullsFirst) { + super(expressions); + this.expressions = expressions; + this.isAsc = isAsc; + this.nullsLast = nullsFirst; + } + + public List getExpressions() { + return expressions; + } + + public List isAscending() { + return isAsc; + } + + public List nullsLast() { + return nullsLast; + } + + @Override + public String toSimpleSql() { + // ORDER BY [ASC/DESC] [NULLS FIRST/LAST] + // ASC and NULLS LAST are default + List sqlOrderElement = new ArrayList<>(); + for (int i=0; i R accept(SqlNodeVisitor visitor) { + return visitor.visit(this); + } + +} diff --git a/jdbc-adapter/virtualschema-common/src/main/java/com/exasol/adapter/sql/SqlPredicate.java b/jdbc-adapter/virtualschema-common/src/main/java/com/exasol/adapter/sql/SqlPredicate.java new file mode 100644 index 000000000..46cb6f659 --- /dev/null +++ b/jdbc-adapter/virtualschema-common/src/main/java/com/exasol/adapter/sql/SqlPredicate.java @@ -0,0 +1,21 @@ +package com.exasol.adapter.sql; + +import java.util.List; + +public abstract class SqlPredicate extends SqlNode { + + private Predicate function; + + public SqlPredicate(Predicate function) { + this.function = function; + } + + public SqlPredicate(List sons, Predicate function) { + super(sons); + this.function = function; + } + + public Predicate getFunction() { + return function; + } +} diff --git a/jdbc-adapter/virtualschema-common/src/main/java/com/exasol/adapter/sql/SqlPredicateAnd.java b/jdbc-adapter/virtualschema-common/src/main/java/com/exasol/adapter/sql/SqlPredicateAnd.java new file mode 100644 index 000000000..2f33ecde9 --- /dev/null +++ b/jdbc-adapter/virtualschema-common/src/main/java/com/exasol/adapter/sql/SqlPredicateAnd.java @@ -0,0 +1,34 @@ +package com.exasol.adapter.sql; + +import java.util.ArrayList; +import java.util.List; + +import com.google.common.base.Joiner; + + +public class SqlPredicateAnd extends SqlPredicate { + + public SqlPredicateAnd(List andedPredicates) { + super(andedPredicates, Predicate.AND); + } + + @Override + public String toSimpleSql() { + List operandsSql = new ArrayList<>(); + for (SqlNode node : getSons()) { + operandsSql.add(node.toSimpleSql()); + } + return "(" + Joiner.on(" AND ").join(operandsSql) + ")"; + } + + @Override + public SqlNodeType getType() { + return SqlNodeType.PREDICATE_AND; + } + + @Override + public R accept(SqlNodeVisitor visitor) { + return visitor.visit(this); + } + +} diff --git a/jdbc-adapter/virtualschema-common/src/main/java/com/exasol/adapter/sql/SqlPredicateBetween.java b/jdbc-adapter/virtualschema-common/src/main/java/com/exasol/adapter/sql/SqlPredicateBetween.java new file mode 100644 index 000000000..12cb7c77f --- /dev/null +++ b/jdbc-adapter/virtualschema-common/src/main/java/com/exasol/adapter/sql/SqlPredicateBetween.java @@ -0,0 +1,49 @@ +package com.exasol.adapter.sql; + + +import com.google.common.collect.ImmutableList; +import com.google.common.collect.Lists; + +import java.util.List; + +public class SqlPredicateBetween extends SqlPredicate { + + private SqlNode expression; + private SqlNode betweenLeft; + private SqlNode betweenRight; + + public SqlPredicateBetween(SqlNode expression, SqlNode betweenLeft, SqlNode betweenRight) { + super(ImmutableList.of(expression, betweenLeft, betweenRight), Predicate.BETWEEN); + this.expression = expression; + this.betweenLeft = betweenLeft; + this.betweenRight = betweenRight; + } + + public SqlNode getExpression() { + return expression; + } + + public SqlNode getBetweenLeft() { + return betweenLeft; + } + + public SqlNode getBetweenRight() { + return betweenRight; + } + + @Override + public String toSimpleSql() { + return expression.toSimpleSql() + " BETWEEN " + betweenLeft.toSimpleSql() + " AND " + betweenRight.toSimpleSql(); + } + + @Override + public SqlNodeType getType() { + return SqlNodeType.PREDICATE_BETWEEN; + } + + @Override + public R accept(SqlNodeVisitor visitor) { + return visitor.visit(this); + } + +} diff --git a/jdbc-adapter/virtualschema-common/src/main/java/com/exasol/adapter/sql/SqlPredicateEqual.java b/jdbc-adapter/virtualschema-common/src/main/java/com/exasol/adapter/sql/SqlPredicateEqual.java new file mode 100644 index 000000000..ef68e1bb8 --- /dev/null +++ b/jdbc-adapter/virtualschema-common/src/main/java/com/exasol/adapter/sql/SqlPredicateEqual.java @@ -0,0 +1,40 @@ +package com.exasol.adapter.sql; + + +import com.google.common.collect.ImmutableList; + +public class SqlPredicateEqual extends SqlPredicate { + + private SqlNode left; + private SqlNode right; + + public SqlPredicateEqual(SqlNode left, SqlNode right) { + super(ImmutableList.of(left, right), Predicate.EQUAL); + this.left = left; + this.right = right; + } + + public SqlNode getLeft() { + return left; + } + + public SqlNode getRight() { + return right; + } + + @Override + public String toSimpleSql() { + return left.toSimpleSql() + " = " + right.toSimpleSql(); + } + + @Override + public SqlNodeType getType() { + return SqlNodeType.PREDICATE_EQUAL; + } + + @Override + public R accept(SqlNodeVisitor visitor) { + return visitor.visit(this); + } + +} diff --git a/jdbc-adapter/virtualschema-common/src/main/java/com/exasol/adapter/sql/SqlPredicateInConstList.java b/jdbc-adapter/virtualschema-common/src/main/java/com/exasol/adapter/sql/SqlPredicateInConstList.java new file mode 100644 index 000000000..2a1aa6e73 --- /dev/null +++ b/jdbc-adapter/virtualschema-common/src/main/java/com/exasol/adapter/sql/SqlPredicateInConstList.java @@ -0,0 +1,50 @@ +package com.exasol.adapter.sql; + +import java.util.ArrayList; +import java.util.List; + +import com.google.common.base.Joiner; +import com.google.common.collect.ImmutableList; + + +public class SqlPredicateInConstList extends SqlPredicate { + + // For IN (...) this stores + SqlNode expression; + // Arguments inside the brackets + List inArguments; + + public SqlPredicateInConstList(SqlNode expression, List inArguments) { + super(ImmutableList.builder().add(expression).addAll(inArguments).build(), Predicate.IN_CONSTLIST); + this.expression = expression; + this.inArguments = inArguments; + } + + public SqlNode getExpression() { + return expression; + } + + public List getInArguments() { + return inArguments; + } + + @Override + public String toSimpleSql() { + List argumentsSql = new ArrayList<>(); + for (SqlNode node : inArguments) { + argumentsSql.add(node.toSimpleSql()); + } + return expression.toSimpleSql() + " IN (" + Joiner.on(", ").join(argumentsSql) + ")"; + } + + @Override + public SqlNodeType getType() { + return SqlNodeType.PREDICATE_IN_CONSTLIST; + } + + @Override + public R accept(SqlNodeVisitor visitor) { + return visitor.visit(this); + } + +} diff --git a/jdbc-adapter/virtualschema-common/src/main/java/com/exasol/adapter/sql/SqlPredicateIsNotNull.java b/jdbc-adapter/virtualschema-common/src/main/java/com/exasol/adapter/sql/SqlPredicateIsNotNull.java new file mode 100644 index 000000000..6ad0ed531 --- /dev/null +++ b/jdbc-adapter/virtualschema-common/src/main/java/com/exasol/adapter/sql/SqlPredicateIsNotNull.java @@ -0,0 +1,34 @@ +package com.exasol.adapter.sql; + + +import com.google.common.collect.ImmutableList; + +public class SqlPredicateIsNotNull extends SqlPredicate { + + private SqlNode expression; + + public SqlPredicateIsNotNull(SqlNode expression) { + super(ImmutableList.of(expression), Predicate.IS_NULL); + this.expression = expression; + } + + public SqlNode getExpression() { + return expression; + } + + @Override + public String toSimpleSql() { + return expression.toSimpleSql() + " IS NOT NULL"; + } + + @Override + public SqlNodeType getType() { + return SqlNodeType.PREDICATE_IS_NULL; + } + + @Override + public R accept(SqlNodeVisitor visitor) { + return visitor.visit(this); + } + +} diff --git a/jdbc-adapter/virtualschema-common/src/main/java/com/exasol/adapter/sql/SqlPredicateIsNull.java b/jdbc-adapter/virtualschema-common/src/main/java/com/exasol/adapter/sql/SqlPredicateIsNull.java new file mode 100644 index 000000000..b48b2a160 --- /dev/null +++ b/jdbc-adapter/virtualschema-common/src/main/java/com/exasol/adapter/sql/SqlPredicateIsNull.java @@ -0,0 +1,34 @@ +package com.exasol.adapter.sql; + + +import com.google.common.collect.ImmutableList; + +public class SqlPredicateIsNull extends SqlPredicate { + + private SqlNode expression; + + public SqlPredicateIsNull(SqlNode expression) { + super(ImmutableList.of(expression), Predicate.IS_NULL); + this.expression = expression; + } + + public SqlNode getExpression() { + return expression; + } + + @Override + public String toSimpleSql() { + return expression.toSimpleSql() + " IS NULL"; + } + + @Override + public SqlNodeType getType() { + return SqlNodeType.PREDICATE_IS_NULL; + } + + @Override + public R accept(SqlNodeVisitor visitor) { + return visitor.visit(this); + } + +} diff --git a/jdbc-adapter/virtualschema-common/src/main/java/com/exasol/adapter/sql/SqlPredicateLess.java b/jdbc-adapter/virtualschema-common/src/main/java/com/exasol/adapter/sql/SqlPredicateLess.java new file mode 100644 index 000000000..6501e4c02 --- /dev/null +++ b/jdbc-adapter/virtualschema-common/src/main/java/com/exasol/adapter/sql/SqlPredicateLess.java @@ -0,0 +1,40 @@ +package com.exasol.adapter.sql; + + +import com.google.common.collect.ImmutableList; + +public class SqlPredicateLess extends SqlPredicate { + + private SqlNode left; + private SqlNode right; + + public SqlPredicateLess(SqlNode left, SqlNode right) { + super(ImmutableList.of(left, right), Predicate.LESS); + this.left = left; + this.right = right; + } + + public SqlNode getLeft() { + return left; + } + + public SqlNode getRight() { + return right; + } + + @Override + public String toSimpleSql() { + return left.toSimpleSql() + " < " + right.toSimpleSql(); + } + + @Override + public SqlNodeType getType() { + return SqlNodeType.PREDICATE_LESS; + } + + @Override + public R accept(SqlNodeVisitor visitor) { + return visitor.visit(this); + } + +} diff --git a/jdbc-adapter/virtualschema-common/src/main/java/com/exasol/adapter/sql/SqlPredicateLessEqual.java b/jdbc-adapter/virtualschema-common/src/main/java/com/exasol/adapter/sql/SqlPredicateLessEqual.java new file mode 100644 index 000000000..104d6ed1f --- /dev/null +++ b/jdbc-adapter/virtualschema-common/src/main/java/com/exasol/adapter/sql/SqlPredicateLessEqual.java @@ -0,0 +1,40 @@ +package com.exasol.adapter.sql; + + +import com.google.common.collect.ImmutableList; + +public class SqlPredicateLessEqual extends SqlPredicate { + + private SqlNode left; + private SqlNode right; + + public SqlPredicateLessEqual(SqlNode left, SqlNode right) { + super(ImmutableList.of(left, right), Predicate.LESSEQUAL); + this.left = left; + this.right = right; + } + + public SqlNode getLeft() { + return left; + } + + public SqlNode getRight() { + return right; + } + + @Override + public String toSimpleSql() { + return left.toSimpleSql() + " <= " + right.toSimpleSql(); + } + + @Override + public SqlNodeType getType() { + return SqlNodeType.PREDICATE_LESSEQUAL; + } + + @Override + public R accept(SqlNodeVisitor visitor) { + return visitor.visit(this); + } + +} diff --git a/jdbc-adapter/virtualschema-common/src/main/java/com/exasol/adapter/sql/SqlPredicateLike.java b/jdbc-adapter/virtualschema-common/src/main/java/com/exasol/adapter/sql/SqlPredicateLike.java new file mode 100644 index 000000000..b2a7b9901 --- /dev/null +++ b/jdbc-adapter/virtualschema-common/src/main/java/com/exasol/adapter/sql/SqlPredicateLike.java @@ -0,0 +1,57 @@ +package com.exasol.adapter.sql; + + +import com.google.common.collect.ImmutableList; + +/** + * + */ +public class SqlPredicateLike extends SqlPredicate { + + private SqlNode left; + private SqlNode pattern; + private SqlNode escapeChar; + + public SqlPredicateLike(SqlNode left, SqlNode pattern) { + this(left, pattern, null); + } + + public SqlPredicateLike(SqlNode left, SqlNode pattern, SqlNode escapeChar) { + super(ImmutableList.of(left, pattern), Predicate.LIKE); + this.left = left; + this.pattern = pattern; + this.escapeChar = escapeChar; + } + + public SqlNode getLeft() { + return left; + } + + public SqlNode getPattern() { + return pattern; + } + + public SqlNode getEscapeChar() { + return escapeChar; + } + + @Override + public String toSimpleSql() { + String sql = left.toSimpleSql() + " LIKE " + pattern.toSimpleSql(); + if (escapeChar != null) { + sql += " ESCAPE " + escapeChar; + } + return sql; + } + + @Override + public SqlNodeType getType() { + return SqlNodeType.PREDICATE_LIKE; + } + + @Override + public R accept(SqlNodeVisitor visitor) { + return visitor.visit(this); + } + +} diff --git a/jdbc-adapter/virtualschema-common/src/main/java/com/exasol/adapter/sql/SqlPredicateLikeRegexp.java b/jdbc-adapter/virtualschema-common/src/main/java/com/exasol/adapter/sql/SqlPredicateLikeRegexp.java new file mode 100644 index 000000000..65e45765a --- /dev/null +++ b/jdbc-adapter/virtualschema-common/src/main/java/com/exasol/adapter/sql/SqlPredicateLikeRegexp.java @@ -0,0 +1,40 @@ +package com.exasol.adapter.sql; + + +import com.google.common.collect.ImmutableList; + +public class SqlPredicateLikeRegexp extends SqlPredicate { + + private SqlNode left; + private SqlNode pattern; + + public SqlPredicateLikeRegexp(SqlNode left, SqlNode pattern) { + super(ImmutableList.of(left, pattern), Predicate.REGEXP_LIKE); + this.left = left; + this.pattern = pattern; + } + + public SqlNode getLeft() { + return left; + } + + public SqlNode getPattern() { + return pattern; + } + + @Override + public String toSimpleSql() { + return left.toSimpleSql() + " REGEXP_LIKE " + pattern.toSimpleSql(); + } + + @Override + public SqlNodeType getType() { + return SqlNodeType.PREDICATE_LIKE_REGEXP; + } + + @Override + public R accept(SqlNodeVisitor visitor) { + return visitor.visit(this); + } + +} diff --git a/jdbc-adapter/virtualschema-common/src/main/java/com/exasol/adapter/sql/SqlPredicateNot.java b/jdbc-adapter/virtualschema-common/src/main/java/com/exasol/adapter/sql/SqlPredicateNot.java new file mode 100644 index 000000000..7feae89aa --- /dev/null +++ b/jdbc-adapter/virtualschema-common/src/main/java/com/exasol/adapter/sql/SqlPredicateNot.java @@ -0,0 +1,34 @@ +package com.exasol.adapter.sql; + + +import com.google.common.collect.ImmutableList; + +public class SqlPredicateNot extends SqlPredicate { + + private SqlNode expression; + + public SqlPredicateNot(SqlNode expression) { + super(ImmutableList.of(expression), Predicate.NOT); + this.expression = expression; + } + + public SqlNode getExpression() { + return expression; + } + + @Override + public String toSimpleSql() { + return "NOT (" + expression.toSimpleSql() + ")"; + } + + @Override + public SqlNodeType getType() { + return SqlNodeType.PREDICATE_NOT; + } + + @Override + public R accept(SqlNodeVisitor visitor) { + return visitor.visit(this); + } + +} diff --git a/jdbc-adapter/virtualschema-common/src/main/java/com/exasol/adapter/sql/SqlPredicateNotEqual.java b/jdbc-adapter/virtualschema-common/src/main/java/com/exasol/adapter/sql/SqlPredicateNotEqual.java new file mode 100644 index 000000000..8ce5abf95 --- /dev/null +++ b/jdbc-adapter/virtualschema-common/src/main/java/com/exasol/adapter/sql/SqlPredicateNotEqual.java @@ -0,0 +1,40 @@ +package com.exasol.adapter.sql; + + +import com.google.common.collect.ImmutableList; + +public class SqlPredicateNotEqual extends SqlPredicate { + + private SqlNode left; + private SqlNode right; + + public SqlPredicateNotEqual(SqlNode left, SqlNode right) { + super(ImmutableList.of(left, right), Predicate.NOTEQUAL); + this.left = left; + this.right = right; + } + + public SqlNode getLeft() { + return left; + } + + public SqlNode getRight() { + return right; + } + + @Override + public String toSimpleSql() { + return left.toSimpleSql() + " != " + right.toSimpleSql(); + } + + @Override + public SqlNodeType getType() { + return SqlNodeType.PREDICATE_NOTEQUAL; + } + + @Override + public R accept(SqlNodeVisitor visitor) { + return visitor.visit(this); + } + +} diff --git a/jdbc-adapter/virtualschema-common/src/main/java/com/exasol/adapter/sql/SqlPredicateOr.java b/jdbc-adapter/virtualschema-common/src/main/java/com/exasol/adapter/sql/SqlPredicateOr.java new file mode 100644 index 000000000..c033ea0f3 --- /dev/null +++ b/jdbc-adapter/virtualschema-common/src/main/java/com/exasol/adapter/sql/SqlPredicateOr.java @@ -0,0 +1,34 @@ +package com.exasol.adapter.sql; + +import java.util.ArrayList; +import java.util.List; + +import com.google.common.base.Joiner; + + +public class SqlPredicateOr extends SqlPredicate { + + public SqlPredicateOr(List orPredicates) { + super(orPredicates, Predicate.OR); + } + + @Override + public String toSimpleSql() { + List operandsSql = new ArrayList<>(); + for (SqlNode node : getSons()) { + operandsSql.add(node.toSimpleSql()); + } + return "(" + Joiner.on(" OR ").join(operandsSql) + ")"; + } + + @Override + public SqlNodeType getType() { + return SqlNodeType.PREDICATE_OR; + } + + @Override + public R accept(SqlNodeVisitor visitor) { + return visitor.visit(this); + } + +} diff --git a/jdbc-adapter/virtualschema-common/src/main/java/com/exasol/adapter/sql/SqlSelectList.java b/jdbc-adapter/virtualschema-common/src/main/java/com/exasol/adapter/sql/SqlSelectList.java new file mode 100644 index 000000000..1f3ee473c --- /dev/null +++ b/jdbc-adapter/virtualschema-common/src/main/java/com/exasol/adapter/sql/SqlSelectList.java @@ -0,0 +1,71 @@ +package com.exasol.adapter.sql; + +import java.util.ArrayList; +import java.util.List; + +import com.google.common.base.Joiner; + +public class SqlSelectList extends SqlExpressionList { + + /** + * If true, we just need one arbitrary value for each row. Example: If user + * runs COUNT (*) and COUNT cannot be pushed down, we need to return any + * value for each row (e.g. constant TRUE) and then EXASOL can do the COUNT. + */ + boolean requestedAnyColumn = false; + + /** + * Call this if all columns are required, i.e. SELECT * FROM ... + */ + public SqlSelectList() { } + + /** + * This is required in two cases
+ * 1. If selectList has > 1 elements: This is a regular select list 2. If + * selectList has no element: This means that any column is required + */ + public SqlSelectList(List selectList) { + super(selectList); + if (selectList.size() == 0) { + requestedAnyColumn = true; + } + } + + public boolean isRequestAnyColumn() { + return requestedAnyColumn; + } + + /** + * @return true if this is "SELECT *", false otherwise + */ + public boolean isSelectStar() { + return getSons().isEmpty(); + } + + @Override + public SqlNodeType getType() { + return SqlNodeType.SELECT_LIST; + } + + @Override + public String toSimpleSql() { + if (requestedAnyColumn) { + // The system requested any column + return "true"; + } + if (getSons().isEmpty()) { + return "*"; + } + List selectElement = new ArrayList<>(); + for (SqlNode node : getSons()) { + selectElement.add(node.toSimpleSql()); + } + return Joiner.on(", ").join(selectElement); + } + + @Override + public R accept(SqlNodeVisitor visitor) { + return visitor.visit(this); + } + +} diff --git a/jdbc-adapter/virtualschema-common/src/main/java/com/exasol/adapter/sql/SqlStatement.java b/jdbc-adapter/virtualschema-common/src/main/java/com/exasol/adapter/sql/SqlStatement.java new file mode 100644 index 000000000..3a619c21a --- /dev/null +++ b/jdbc-adapter/virtualschema-common/src/main/java/com/exasol/adapter/sql/SqlStatement.java @@ -0,0 +1,15 @@ +package com.exasol.adapter.sql; + +import java.util.List; + +public abstract class SqlStatement extends SqlNode { + + public SqlStatement(List sons) { + super(sons); + } + + public SqlStatement() { + + } + +} diff --git a/jdbc-adapter/virtualschema-common/src/main/java/com/exasol/adapter/sql/SqlStatementSelect.java b/jdbc-adapter/virtualschema-common/src/main/java/com/exasol/adapter/sql/SqlStatementSelect.java new file mode 100644 index 000000000..7a84a675c --- /dev/null +++ b/jdbc-adapter/virtualschema-common/src/main/java/com/exasol/adapter/sql/SqlStatementSelect.java @@ -0,0 +1,130 @@ +package com.exasol.adapter.sql; + +import java.util.ArrayList; +import java.util.Collections; +import java.util.List; + +/** + * We could consider to apply builder pattern here (if time) + */ +public class SqlStatementSelect extends SqlStatement { + + private SqlTable fromClause; // can be changed to SqlNode later if we support more complex things + private SqlSelectList selectList; + private SqlNode whereClause; + private SqlExpressionList groupBy; + private SqlNode having; + private SqlOrderBy orderBy; + private SqlLimit limit; + + public SqlStatementSelect(SqlTable fromClause, SqlSelectList selectList, SqlNode whereClause, SqlExpressionList groupBy, SqlNode having, SqlOrderBy orderBy, SqlLimit limit) { + List sons = new ArrayList<>(); + sons.add(fromClause); + sons.add(selectList); + sons.add(whereClause); + sons.add(groupBy); + sons.add(having); + sons.add(orderBy); + sons.add(limit); + sons.removeAll(Collections.singleton(null)); + this.setSons(sons); + this.fromClause = fromClause; + this.selectList = selectList; + this.whereClause = whereClause; + this.groupBy = groupBy; + this.having = having; + this.orderBy = orderBy; + this.limit = limit; + assert(this.fromClause != null); + assert(this.selectList != null); + } + + public boolean hasProjection() { + return selectList != null; + } + + public boolean hasGroupBy() { + return groupBy != null; + } + + public boolean hasHaving() { + return having != null; + } + + public boolean hasFilter() { + return whereClause != null; + } + + public boolean hasOrderBy() { + return orderBy != null; + } + + public boolean hasLimit() { + return limit != null; + } + + public SqlTable getFromClause() { + return fromClause; + } + + public SqlSelectList getSelectList() { + return selectList; + } + + public SqlNode getWhereClause() { + return whereClause; + } + + public SqlExpressionList getGroupBy() { + return groupBy; + } + + public SqlNode getHaving() { + return having; + } + + public SqlOrderBy getOrderBy() { + return orderBy; + } + + public SqlLimit getLimit() { + return limit; + } + + @Override + public String toSimpleSql() { + + StringBuilder sql = new StringBuilder(); + sql.append("SELECT "); + sql.append(selectList.toSimpleSql()); + sql.append(" FROM "); + sql.append(fromClause.toSimpleSql()); + if (hasFilter()) { + sql.append(" WHERE " + whereClause.toSimpleSql()); + } + if (hasGroupBy()) { + sql.append(" GROUP BY " + groupBy.toSimpleSql()); + } + if (hasHaving()) { + sql.append(" HAVING " + having.toSimpleSql()); + } + if (hasOrderBy()) { + sql.append(" " + orderBy.toSimpleSql()); + } + if (hasLimit()) { + sql.append(" " + limit.toSimpleSql()); + } + return sql.toString(); + } + + @Override + public SqlNodeType getType() { + return SqlNodeType.SELECT; + } + + @Override + public R accept(SqlNodeVisitor visitor) { + return visitor.visit(this); + } + +} diff --git a/jdbc-adapter/virtualschema-common/src/main/java/com/exasol/adapter/sql/SqlTable.java b/jdbc-adapter/virtualschema-common/src/main/java/com/exasol/adapter/sql/SqlTable.java new file mode 100644 index 000000000..f1df95877 --- /dev/null +++ b/jdbc-adapter/virtualschema-common/src/main/java/com/exasol/adapter/sql/SqlTable.java @@ -0,0 +1,54 @@ +package com.exasol.adapter.sql; + +import com.exasol.adapter.metadata.TableMetadata; + +public class SqlTable extends SqlNode { + + private String name; + private String alias; // what is the exact semantic of this? Currently simply to generate a query with the expected alias. + private TableMetadata metadata; + + public SqlTable(String name, TableMetadata metadata) { + this.name = name; + this.alias = name; + this.metadata = metadata; + } + + public SqlTable(String name, String alias, TableMetadata metadata) { + this.name = name; + this.alias = alias; + this.metadata = metadata; + } + + public boolean hasAlias() { + return !name.equals(alias); + } + + public String getName() { + return name; + } + + public String getAlias() { + return alias; + } + + public TableMetadata getMetadata() { + return metadata; + } + + @Override + public String toSimpleSql() { + return "\"" + name.replace("\"", "\"\"") + "\""; + } + + @Override + public SqlNodeType getType() { + return SqlNodeType.TABLE; + } + + @Override + public R accept(SqlNodeVisitor visitor) { + return visitor.visit(this); + } + +} diff --git a/jdbc-adapter/virtualschema-common/src/main/java/com/exasol/adapter/sql/SqlUtils.java b/jdbc-adapter/virtualschema-common/src/main/java/com/exasol/adapter/sql/SqlUtils.java new file mode 100644 index 000000000..da987e5d4 --- /dev/null +++ b/jdbc-adapter/virtualschema-common/src/main/java/com/exasol/adapter/sql/SqlUtils.java @@ -0,0 +1,20 @@ +package com.exasol.adapter.sql; + +import java.util.Map; + +public class SqlUtils { + + public static String quoteIdentifierIfNeeded(String identifier, Map config) { + String quoteChar = "\""; + if (config.containsKey("QUOTE_CHAR")) { + quoteChar = config.get("QUOTE_CHAR").toString(); + } + if (identifier.toUpperCase().equals(identifier)) { + // Only upper case, no need to quote + return identifier; + } else { + return quoteChar + identifier + quoteChar; + } + } + +} diff --git a/jdbc-adapter/virtualschema-common/src/main/java/com/exasol/utils/JsonHelper.java b/jdbc-adapter/virtualschema-common/src/main/java/com/exasol/utils/JsonHelper.java new file mode 100644 index 000000000..577b05a55 --- /dev/null +++ b/jdbc-adapter/virtualschema-common/src/main/java/com/exasol/utils/JsonHelper.java @@ -0,0 +1,52 @@ +package com.exasol.utils; + +import java.io.PrintWriter; +import java.io.StringReader; +import java.io.StringWriter; +import java.util.HashMap; +import java.util.Map; + +import javax.json.Json; +import javax.json.JsonBuilderFactory; +import javax.json.JsonObject; +import javax.json.JsonReader; +import javax.json.JsonWriter; +import javax.json.stream.JsonGenerator; + +/** + * http://docs.oracle.com/javaee/7/api/javax/json/JsonObjectBuilder.html + * http://docs.oracle.com/javaee/7/api/javax/json/stream/JsonGenerator.html + */ +public class JsonHelper { + + public static JsonBuilderFactory getBuilderFactory() { + Map config = new HashMap(); + return Json.createBuilderFactory(config); + } + + public static JsonObject getJsonObject(String data) throws Exception { + JsonReader jr = Json.createReader(new StringReader(data)); + JsonObject obj = jr.readObject(); + jr.close(); + return obj; + } + + public static String getKeyAsString(JsonObject obj, String key, String defaultValue) { + String value = defaultValue; + if (obj.containsKey(key)) { + value = obj.get(key).toString(); + } + return value; + } + + public static String prettyJson(JsonObject obj) { + Map config = new HashMap(); + config.put(JsonGenerator.PRETTY_PRINTING, true); + StringWriter strWriter = new StringWriter(); + PrintWriter pw = new PrintWriter(strWriter); + JsonWriter jsonWriter = Json.createWriterFactory(config).createWriter(pw); + jsonWriter.writeObject(obj); + return strWriter.toString(); + } + +} diff --git a/jdbc-adapter/virtualschema-common/src/main/java/com/exasol/utils/SqlTestUtil.java b/jdbc-adapter/virtualschema-common/src/main/java/com/exasol/utils/SqlTestUtil.java new file mode 100644 index 000000000..0b7c9cf82 --- /dev/null +++ b/jdbc-adapter/virtualschema-common/src/main/java/com/exasol/utils/SqlTestUtil.java @@ -0,0 +1,14 @@ +package com.exasol.utils; + +public class SqlTestUtil { + + /** + * Convert newlines, tabs, and double whitespaces to whitespaces. At the end only single whitespaces remain. + */ + public static String normalizeSql(String sql) { + return sql.replaceAll("\t", " ") + .replaceAll("\n", " ") + .replaceAll("\\s+", " "); + } + +} diff --git a/jdbc-adapter/virtualschema-common/src/test/java/com/exasol/adapter/capabilities/AggregateFunctionCapabilityTest.java b/jdbc-adapter/virtualschema-common/src/test/java/com/exasol/adapter/capabilities/AggregateFunctionCapabilityTest.java new file mode 100644 index 000000000..ca4222dad --- /dev/null +++ b/jdbc-adapter/virtualschema-common/src/test/java/com/exasol/adapter/capabilities/AggregateFunctionCapabilityTest.java @@ -0,0 +1,31 @@ +package com.exasol.adapter.capabilities; + +import com.exasol.adapter.sql.AggregateFunction; +import org.junit.Test; + +import static org.junit.Assert.*; + +public class AggregateFunctionCapabilityTest { + + @Test + public void testCompleteness() { + // Do we have functions where we don't have capabilities for? + for (AggregateFunction func : AggregateFunction.values()) { + boolean foundCap = false; + for (AggregateFunctionCapability cap : AggregateFunctionCapability.values()) { + if (cap.getFunction() == func) { + foundCap = true; + } + } + assertTrue("Did not find a capability for function " + func.name(), foundCap); + } + } + + @Test + public void testConsistentNaming () { + for (AggregateFunctionCapability cap : AggregateFunctionCapability.values()) { + assertTrue(cap.name().startsWith(cap.getFunction().name())); + } + } + +} \ No newline at end of file diff --git a/jdbc-adapter/virtualschema-common/src/test/java/com/exasol/adapter/capabilities/PredicateCapabilityTest.java b/jdbc-adapter/virtualschema-common/src/test/java/com/exasol/adapter/capabilities/PredicateCapabilityTest.java new file mode 100644 index 000000000..4b9f75640 --- /dev/null +++ b/jdbc-adapter/virtualschema-common/src/test/java/com/exasol/adapter/capabilities/PredicateCapabilityTest.java @@ -0,0 +1,33 @@ +package com.exasol.adapter.capabilities; + +import com.exasol.adapter.sql.AggregateFunction; +import com.exasol.adapter.sql.Predicate; +import org.junit.Test; + +import static org.junit.Assert.*; +import static org.junit.Assert.assertTrue; + +public class PredicateCapabilityTest { + + @Test + public void testCompleteness() { + // Do we have predicates where we don't have capabilities for? + for (Predicate pred : Predicate.values()) { + boolean foundCap = false; + for (PredicateCapability cap : PredicateCapability.values()) { + if (cap.getPredicate() == pred) { + foundCap = true; + } + } + assertTrue("Did not find a capability for predicate " + pred.name(), foundCap); + } + } + + @Test + public void testConsistentNaming () { + for (PredicateCapability cap : PredicateCapability.values()) { + assertTrue(cap.name().startsWith(cap.getPredicate().name())); + } + } + +} \ No newline at end of file diff --git a/jdbc-adapter/virtualschema-common/src/test/java/com/exasol/adapter/capabilities/ScalarFunctionCapabilityTest.java b/jdbc-adapter/virtualschema-common/src/test/java/com/exasol/adapter/capabilities/ScalarFunctionCapabilityTest.java new file mode 100644 index 000000000..92bbdc855 --- /dev/null +++ b/jdbc-adapter/virtualschema-common/src/test/java/com/exasol/adapter/capabilities/ScalarFunctionCapabilityTest.java @@ -0,0 +1,34 @@ +package com.exasol.adapter.capabilities; + +import com.exasol.adapter.sql.AggregateFunction; +import com.exasol.adapter.sql.Predicate; +import com.exasol.adapter.sql.ScalarFunction; +import org.junit.Test; + +import static org.junit.Assert.*; +import static org.junit.Assert.assertTrue; + +public class ScalarFunctionCapabilityTest { + + @Test + public void testCompleteness() { + // Do we have functions where we don't have capabilities for? + for (ScalarFunction function : ScalarFunction.values()) { + boolean foundCap = false; + for (ScalarFunctionCapability cap : ScalarFunctionCapability.values()) { + if (cap.getFunction() == function) { + foundCap = true; + } + } + assertTrue("Did not find a capability for function " + function.name(), foundCap); + } + } + + @Test + public void testConsistentNaming () { + for (ScalarFunctionCapability cap : ScalarFunctionCapability.values()) { + assertEquals(cap.name(), cap.getFunction().name()); + } + } + +} \ No newline at end of file diff --git a/jdbc-adapter/virtualschema-common/src/test/java/com/exasol/adapter/json/RequestJsonParserTest.java b/jdbc-adapter/virtualschema-common/src/test/java/com/exasol/adapter/json/RequestJsonParserTest.java new file mode 100644 index 000000000..01cf47b2b --- /dev/null +++ b/jdbc-adapter/virtualschema-common/src/test/java/com/exasol/adapter/json/RequestJsonParserTest.java @@ -0,0 +1,125 @@ +package com.exasol.adapter.json; + +import static org.junit.Assert.assertTrue; + +import java.io.File; +import java.util.ArrayList; +import java.util.HashMap; +import java.util.List; +import java.util.Map; + +import org.junit.Test; +import org.mockito.internal.matchers.apachecommons.ReflectionEquals; + +import com.exasol.adapter.metadata.ColumnMetadata; +import com.exasol.adapter.metadata.SchemaMetadataInfo; +import com.exasol.adapter.metadata.DataType; +import com.exasol.adapter.metadata.TableMetadata; +import com.exasol.adapter.metadata.DataType.ExaCharset; +import com.exasol.adapter.request.AdapterRequest; +import com.exasol.adapter.request.PushdownRequest; +import com.exasol.adapter.request.SetPropertiesRequest; +import com.google.common.base.Charsets; +import com.google.common.io.Files; + +public class RequestJsonParserTest { + + @Test + public void testParsePushdownRequest() throws Exception { + // test resources from src/test/resources are copied to target/test-classes, and this folder is the classpath of the junit test. + String file = "target/test-classes/pushdown_request.json"; + String json = Files.toString(new File(file), Charsets.UTF_8); + + Map properties = new HashMap(); + properties.put("HIVE_SERVER", "my-hive-server"); + properties.put("HIVE_DB", "my-hive-db"); + properties.put("HIVE_USER", "my-hive-user"); + SchemaMetadataInfo expectedSchemaMetaInfo = new SchemaMetadataInfo( + "MY_HIVE_VSCHEMA", "{\"lastRefreshed\":\"2015-03-01 12:10:01\",\"key\":\"Any custom schema state here\"}", + properties); + + List expectedInvolvedTablesMetadata = new ArrayList<>(); + String tableName = "CLICKS"; + String tableAdapterNotes = ""; + List tableColumns = new ArrayList<>(); + tableColumns.add(new ColumnMetadata("ID", "", DataType.createDecimal(22, 0), true, false, "", "")); + tableColumns.add(new ColumnMetadata("USER_ID", "", DataType.createDecimal(18, 0), true, false, "", "")); + tableColumns.add(new ColumnMetadata("URL", "", DataType.createVarChar(1000, ExaCharset.UTF8), true, false, "", "")); + tableColumns.add(new ColumnMetadata("REQUEST_TIME", "", DataType.createTimestamp(false), true, false, "", "")); + String tableComment = ""; + expectedInvolvedTablesMetadata.add(new TableMetadata(tableName, tableAdapterNotes, tableColumns, tableComment)); + + RequestJsonParser parser = new RequestJsonParser(); + AdapterRequest request = parser.parseRequest(json); + assertObjectEquals(expectedSchemaMetaInfo, request.getSchemaMetadataInfo()); + assertObjectEquals(expectedInvolvedTablesMetadata, ((PushdownRequest)request).getInvolvedTablesMetadata()); + } + + @Test + public void testParsePushdownRequestAllTypes() throws Exception { + String file = "target/test-classes/pushdown_request_alltypes.json"; + String json = Files.toString(new File(file), Charsets.UTF_8); + + Map properties = new HashMap(); + SchemaMetadataInfo expectedSchemaMetaInfo = new SchemaMetadataInfo("VS", "", properties); + + List expectedInvolvedTablesMetadata = new ArrayList<>(); + String tableName = "T1"; + String tableAdapterNotes = ""; + String tableComment = ""; + List tableColumns = new ArrayList<>(); + tableColumns.add(new ColumnMetadata("C_DECIMAL", "", DataType.createDecimal(18, 2), true, false, "", "")); + tableColumns.add(new ColumnMetadata("C_DOUBLE", "", DataType.createDouble(), true, false, "", "")); + tableColumns.add(new ColumnMetadata("C_VARCHAR_UTF8_1", "", DataType.createVarChar(10000, ExaCharset.UTF8), true, false, "", "")); + tableColumns.add(new ColumnMetadata("C_VARCHAR_UTF8_2", "", DataType.createVarChar(10000, ExaCharset.UTF8), true, false, "", "")); + tableColumns.add(new ColumnMetadata("C_VARCHAR_ASCII", "", DataType.createVarChar(10000, ExaCharset.ASCII), true, false, "", "")); + tableColumns.add(new ColumnMetadata("C_CHAR_UTF8_1", "", DataType.createChar(3, ExaCharset.UTF8), true, false, "", "")); + tableColumns.add(new ColumnMetadata("C_CHAR_UTF8_2", "", DataType.createChar(3, ExaCharset.UTF8), true, false, "", "")); + tableColumns.add(new ColumnMetadata("C_CHAR_ASCII", "", DataType.createChar(3, ExaCharset.ASCII), true, false, "", "")); + tableColumns.add(new ColumnMetadata("C_DATE", "", DataType.createDate(), true, false, "", "")); + tableColumns.add(new ColumnMetadata("C_TIMESTAMP_1", "", DataType.createTimestamp(false), true, false, "", "")); + tableColumns.add(new ColumnMetadata("C_TIMESTAMP_2", "", DataType.createTimestamp(false), true, false, "", "")); + tableColumns.add(new ColumnMetadata("C_TIMESTAMP_3", "", DataType.createTimestamp(true), true, false, "", "")); + tableColumns.add(new ColumnMetadata("C_BOOLEAN", "", DataType.createBool(), true, false, "", "")); + tableColumns.add(new ColumnMetadata("C_GEOMETRY", "", DataType.createGeometry(1), true, false, "", "")); + tableColumns.add(new ColumnMetadata("C_INTERVAL_DS_1", "", DataType.createIntervalDaySecond(2, 3), true, false, "", "")); + tableColumns.add(new ColumnMetadata("C_INTERVAL_DS_2", "", DataType.createIntervalDaySecond(3, 4), true, false, "", "")); + tableColumns.add(new ColumnMetadata("C_INTERVAL_YM_1", "", DataType.createIntervalYearMonth(2), true, false, "", "")); + tableColumns.add(new ColumnMetadata("C_INTERVAL_YM_2", "", DataType.createIntervalYearMonth(3), true, false, "", "")); + expectedInvolvedTablesMetadata.add(new TableMetadata(tableName, tableAdapterNotes, tableColumns, tableComment)); + + RequestJsonParser parser = new RequestJsonParser(); + AdapterRequest request = parser.parseRequest(json); + assertObjectEquals(expectedSchemaMetaInfo, request.getSchemaMetadataInfo()); + assertObjectEquals(expectedInvolvedTablesMetadata, ((PushdownRequest)request).getInvolvedTablesMetadata()); + } + + @Test + public void testParseSetPropertiesRequest() throws Exception { + String file = "target/test-classes/set_properties_request.json"; + String json = Files.toString(new File(file), Charsets.UTF_8); + + Map expectedOldSchemaProperties = new HashMap(); + expectedOldSchemaProperties.put("EXISTING_PROP_1", "Old Value 1"); + expectedOldSchemaProperties.put("EXISTING_PROP_2", "Old Value 2"); + SchemaMetadataInfo expectedSchemaMetaInfo = new SchemaMetadataInfo("VS", "", expectedOldSchemaProperties); + + Map expectedNewProperties = new HashMap(); + expectedNewProperties.put("EXISTING_PROP_1", "New Value"); + expectedNewProperties.put("EXISTING_PROP_2", null); + expectedNewProperties.put("NEW_PROP", "VAL2"); + expectedNewProperties.put("DELETED_PROP_NON_EXISTING", null); + + RequestJsonParser parser = new RequestJsonParser(); + AdapterRequest request = parser.parseRequest(json); + assertObjectEquals(expectedSchemaMetaInfo, request.getSchemaMetadataInfo()); + assertObjectEquals(expectedNewProperties, ((SetPropertiesRequest)request).getProperties()); + } + + /** + * Without this method we would need to override equals() and .hashcode() for each object, which explodes code and makes it less maintainable + */ + public void assertObjectEquals(final T expected, final T actual) { + assertTrue("Expected:\n" + expected + "\nactual:\n" + actual, new ReflectionEquals(actual, (String[])null).matches(expected)); + } +} diff --git a/jdbc-adapter/virtualschema-common/src/test/java/com/exasol/adapter/sql/SqlNodeTest.java b/jdbc-adapter/virtualschema-common/src/test/java/com/exasol/adapter/sql/SqlNodeTest.java new file mode 100644 index 000000000..340a6e865 --- /dev/null +++ b/jdbc-adapter/virtualschema-common/src/test/java/com/exasol/adapter/sql/SqlNodeTest.java @@ -0,0 +1,60 @@ +package com.exasol.adapter.sql; + +import com.exasol.adapter.metadata.ColumnMetadata; +import com.exasol.adapter.metadata.DataType; +import com.exasol.adapter.metadata.DataType.ExaCharset; +import com.exasol.adapter.metadata.TableMetadata; +import com.exasol.utils.SqlTestUtil; +import com.google.common.collect.ImmutableList; +import org.junit.Test; + +import java.math.BigDecimal; +import java.util.ArrayList; +import java.util.List; + +import static org.junit.Assert.assertEquals; + +public class SqlNodeTest { + + @Test + public void testToSimpleSql() { + SqlNode node = getTestSqlNode(); + String expectedSql = "SELECT \"USER_ID\", COUNT(\"URL\") FROM \"CLICKS\"" + + " WHERE 1 < \"USER_ID\"" + + " GROUP BY \"USER_ID\"" + + " HAVING 1 < COUNT(\"URL\")" + + " ORDER BY \"USER_ID\"" + + " LIMIT 10"; + String actualSql = node.toSimpleSql(); + assertEquals(SqlTestUtil.normalizeSql(expectedSql), SqlTestUtil.normalizeSql(actualSql)); + } + + private SqlNode getTestSqlNode() { + // SELECT USER_ID, count(URL) FROM CLICKS + // WHERE 1 < USER_ID + // GROUP BY USER_ID + // HAVING 1 < COUNT(URL) + // ORDER BY USER_ID + // LIMIT 10; + TableMetadata clicksMeta = getClicksTableMetadata(); + SqlTable fromClause = new SqlTable("CLICKS", clicksMeta); + SqlSelectList selectList = new SqlSelectList(ImmutableList.of( + new SqlColumn(0, clicksMeta.getColumns().get(0)), + new SqlFunctionAggregate(AggregateFunction.COUNT, ImmutableList.of(new SqlColumn(1, clicksMeta.getColumns().get(1))), false))); + SqlNode whereClause = new SqlPredicateLess(new SqlLiteralExactnumeric(BigDecimal.ONE), new SqlColumn(0, clicksMeta.getColumns().get(0))); + SqlExpressionList groupBy = new SqlGroupBy(ImmutableList.of(new SqlColumn(0, clicksMeta.getColumns().get(0)))); + SqlNode countUrl = new SqlFunctionAggregate(AggregateFunction.COUNT, ImmutableList.of(new SqlColumn(1, clicksMeta.getColumns().get(1))), false); + SqlNode having = new SqlPredicateLess(new SqlLiteralExactnumeric(BigDecimal.ONE), countUrl); + SqlOrderBy orderBy = new SqlOrderBy(ImmutableList.of(new SqlColumn(0, clicksMeta.getColumns().get(0))), ImmutableList.of(true), ImmutableList.of(true)); + SqlLimit limit = new SqlLimit(10); + return new SqlStatementSelect(fromClause, selectList, whereClause, groupBy, having, orderBy, limit); + } + + private TableMetadata getClicksTableMetadata() { + List columns = new ArrayList<>(); + columns.add(new ColumnMetadata("USER_ID", "", DataType.createDecimal(18, 0), true, false, "", "")); + columns.add(new ColumnMetadata("URL", "", DataType.createVarChar(10000, ExaCharset.UTF8), true, false, "", "")); + return new TableMetadata("CLICKS", "", columns, ""); + } + +} diff --git a/jdbc-adapter/virtualschema-common/src/test/resources/pushdown_request.json b/jdbc-adapter/virtualschema-common/src/test/resources/pushdown_request.json new file mode 100644 index 000000000..19a5b664d --- /dev/null +++ b/jdbc-adapter/virtualschema-common/src/test/resources/pushdown_request.json @@ -0,0 +1,154 @@ +{ + "type": "pushdown", + "pushdownRequest": { + "type" : "select", + "aggregationType" : "group_by", + "from" : + { + "type" : "table", + "name" : "CLICKS" + }, + "selectList" : + [ + { + "type" : "column", + "name" : "USER_ID", + "columnNr" : 1, + "tableName" : "CLICKS" + }, + { + "type" : "function_aggregate", + "name" : "count", + "arguments" : + [ + { + "type" : "column", + "name" : "URL", + "columnNr" : 2, + "tableName" : "CLICKS" + } + ] + } + ], + "filter" : + { + "type" : "predicate_less", + "left" : + { + "type" : "literal_exactnumeric", + "value" : "1" + }, + "right" : + { + "type" : "column", + "name" : "USER_ID", + "columnNr" : 1, + "tableName" : "CLICKS" + } + }, + "groupBy" : + [ + { + "type" : "column", + "name" : "USER_ID", + "columnNr" : 1, + "tableName" : "CLICKS" + } + ], + "having" : + { + "type" : "predicate_less", + "left" : + { + "type" : "literal_exactnumeric", + "value" : "1" + }, + "right" : + { + "type" : "function_aggregate", + "name" : "count", + "arguments" : + [ + { + "type" : "column", + "name" : "URL", + "columnNr" : 2, + "tableName" : "CLICKS" + } + ] + } + }, + "orderBy" : + [ + { + "type" : "order_by_element", + "expression" : + { + "columnNr" : 1, + "name" : "USER_ID", + "tableName" : "CLICKS", + "type" : "column" + }, + "isAscending" : true, + "nullsLast" : true + } + ], + "limit" : + { + "numElements" : 10 + } + }, + "involvedTables": [ + { + "name" : "CLICKS", + "columns" : + [ + { + "name" : "ID", + "dataType" : + { + "precision" : 22, + "scale" : 0, + "type" : "DECIMAL" + } + }, + { + "name" : "USER_ID", + "dataType" : + { + "precision" : 18, + "scale" : 0, + "type" : "DECIMAL" + } + }, + { + "name" : "URL", + "dataType" : + { + "size" : 1000, + "type" : "VARCHAR" + } + }, + { + "name" : "REQUEST_TIME", + "dataType" : + { + "type" : "TIMESTAMP" + } + } + ] + } + ], + "schemaMetadataInfo": { + "name": "MY_HIVE_VSCHEMA", + "adapterNotes": { + "lastRefreshed": "2015-03-01 12:10:01", + "key": "Any custom schema state here" + }, + "properties": { + "HIVE_SERVER": "my-hive-server", + "HIVE_DB": "my-hive-db", + "HIVE_USER": "my-hive-user" + } + } +} diff --git a/jdbc-adapter/virtualschema-common/src/test/resources/pushdown_request_alltypes.json b/jdbc-adapter/virtualschema-common/src/test/resources/pushdown_request_alltypes.json new file mode 100644 index 000000000..2c146e9e1 --- /dev/null +++ b/jdbc-adapter/virtualschema-common/src/test/resources/pushdown_request_alltypes.json @@ -0,0 +1,154 @@ +{ + "type": "pushdown", + "pushdownRequest": { + "type" : "select", + "from" : + { + "type" : "table", + "name" : "T1" + } + }, + "involvedTables": [ + { + "name": "T1", + "columns": [ + { + "name": "C_DECIMAL", + "dataType": { + "type": "DECIMAL", + "precision": 18, + "scale": 2 + } + }, + { + "name": "C_DOUBLE", + "dataType": { + "type": "DOUBLE" + } + }, + { + "name": "C_VARCHAR_UTF8_1", + "dataType": { + "type": "VARCHAR", + "size": 10000, + "characterSet": "UTF8" + } + }, + { + "name": "C_VARCHAR_UTF8_2", + "dataType": { + "type": "VARCHAR", + "size": 10000 + } + }, + { + "name": "C_VARCHAR_ASCII", + "dataType": { + "type": "VARCHAR", + "size": 10000, + "characterSet": "ASCII" + } + }, + { + "name": "C_CHAR_UTF8_1", + "dataType": { + "type": "CHAR", + "size": 3 + } + }, + { + "name": "C_CHAR_UTF8_2", + "dataType": { + "type": "CHAR", + "size": 3, + "characterSet": "UTF8" + } + }, + { + "name": "C_CHAR_ASCII", + "dataType": { + "type": "CHAR", + "size": 3, + "characterSet": "ASCII" + } + }, + { + "name": "C_DATE", + "dataType": { + "type": "DATE" + } + }, + { + "name": "C_TIMESTAMP_1", + "dataType": { + "type": "TIMESTAMP" + } + }, + { + "name": "C_TIMESTAMP_2", + "dataType": { + "type": "TIMESTAMP", + "withLocalTimeZone": false + } + }, + { + "name": "C_TIMESTAMP_3", + "dataType": { + "type": "TIMESTAMP", + "withLocalTimeZone": true + } + }, + { + "name": "C_BOOLEAN", + "dataType": { + "type": "BOOLEAN" + } + }, + { + "name": "C_GEOMETRY", + "dataType": { + "type": "GEOMETRY", + "srid": 1 + } + }, + { + "name": "C_INTERVAL_DS_1", + "dataType": { + "type": "INTERVAL", + "fromTo": "DAY TO SECONDS" + } + }, + { + "name": "C_INTERVAL_DS_2", + "dataType": { + "type": "INTERVAL", + "fromTo": "DAY TO SECONDS", + "precision": 3, + "fraction": 4 + } + }, + { + "name": "C_INTERVAL_YM_1", + "dataType": { + "type": "INTERVAL", + "fromTo": "YEAR TO MONTH" + } + }, + { + "name": "C_INTERVAL_YM_2", + "dataType": { + "type": "INTERVAL", + "fromTo": "YEAR TO MONTH", + "precision": 3 + } + } + ] + } + ], + "schemaMetadataInfo": { + "name": "VS", + "adapterNotes": "", + "properties": { + } + } +} diff --git a/jdbc-adapter/virtualschema-common/src/test/resources/set_properties_request.json b/jdbc-adapter/virtualschema-common/src/test/resources/set_properties_request.json new file mode 100644 index 000000000..243fed96a --- /dev/null +++ b/jdbc-adapter/virtualschema-common/src/test/resources/set_properties_request.json @@ -0,0 +1,16 @@ +{ + "properties": { + "EXISTING_PROP_1": "New Value", + "EXISTING_PROP_2": null, + "NEW_PROP": "VAL2", + "DELETED_PROP_NON_EXISTING": null + }, + "type": "setProperties", + "schemaMetadataInfo": { + "name": "VS", + "properties": { + "EXISTING_PROP_1": "Old Value 1", + "EXISTING_PROP_2": "Old Value 2" + } + } +} diff --git a/jdbc-adapter/virtualschema-jdbc-adapter-dist/dependency-reduced-pom.xml b/jdbc-adapter/virtualschema-jdbc-adapter-dist/dependency-reduced-pom.xml new file mode 100644 index 000000000..bf214c873 --- /dev/null +++ b/jdbc-adapter/virtualschema-jdbc-adapter-dist/dependency-reduced-pom.xml @@ -0,0 +1,59 @@ + + + + virtualschema-jdbc-adapter-main + com.exasol + 0.0.1-SNAPSHOT + + 4.0.0 + virtualschema-jdbc-adapter-dist + + + + maven-assembly-plugin + 2.4.1 + + + job + + single + + + + + + src/main/assembly/all-dependencies.xml + + + + + maven-shade-plugin + 2.4.3 + + + package + + shade + + + + + + + + + + junit + junit + 4.11 + test + + + hamcrest-core + org.hamcrest + + + + + + diff --git a/jdbc-adapter/virtualschema-jdbc-adapter-dist/pom.xml b/jdbc-adapter/virtualschema-jdbc-adapter-dist/pom.xml new file mode 100644 index 000000000..82d581b31 --- /dev/null +++ b/jdbc-adapter/virtualschema-jdbc-adapter-dist/pom.xml @@ -0,0 +1,74 @@ + + 4.0.0 + + + com.exasol + virtualschema-jdbc-adapter-main + 0.0.1-SNAPSHOT + + virtualschema-jdbc-adapter-dist + + + + + + + com.exasol + virtualschema-common + 0.0.1-SNAPSHOT + + + com.exasol + virtualschema-jdbc-adapter + 0.0.1-SNAPSHOT + + + + + + + org.apache.maven.plugins + maven-assembly-plugin + 2.4.1 + + + src/main/assembly/all-dependencies.xml + + + + + job + + + single + + + + + + org.apache.maven.plugins + maven-shade-plugin + 2.4.3 + + + package + + shade + + + + + + + + + + + diff --git a/jdbc-adapter/virtualschema-jdbc-adapter-dist/src/main/assembly/all-dependencies.xml b/jdbc-adapter/virtualschema-jdbc-adapter-dist/src/main/assembly/all-dependencies.xml new file mode 100644 index 000000000..51b86372b --- /dev/null +++ b/jdbc-adapter/virtualschema-jdbc-adapter-dist/src/main/assembly/all-dependencies.xml @@ -0,0 +1,39 @@ + + all-dependencies + + jar + + false + + + + metaInf-services + + + + + + true + runtime + / + + + + + + + + + diff --git a/jdbc-adapter/virtualschema-jdbc-adapter/pom.xml b/jdbc-adapter/virtualschema-jdbc-adapter/pom.xml new file mode 100644 index 000000000..b04d6b465 --- /dev/null +++ b/jdbc-adapter/virtualschema-jdbc-adapter/pom.xml @@ -0,0 +1,124 @@ + + 4.0.0 + + + com.exasol + virtualschema-jdbc-adapter-main + 0.0.1-SNAPSHOT + + virtualschema-jdbc-adapter + + + 2.19.1 + + + + + it + + + + + + + + + + + org.apache.maven.plugins + maven-failsafe-plugin + ${surefire.and.failsafe.plugin.version} + + + **/*IT.java + + + + + failsafe-integration-tests + integration-test + + integration-test + verify + + + + + verify + verify + + verify + + + + + + org.codehaus.mojo + exec-maven-plugin + 1.5.0 + + + pre-integration-test + + exec + + + test + java + + -classpath + + + com.exasol.adapter.dialects.IntegrationTestSetup + ${project.version} + ${integrationtest.configfile} + + + + + + + + + + + + + com.exasol + virtualschema-common + 0.0.1-SNAPSHOT + + + + org.yaml + snakeyaml + 1.17 + integration-test + + + + + com.exasol + exasol-jdbc + 6.0-SNAPSHOT + + + + + + + org.apache.maven.plugins + maven-surefire-plugin + ${surefire.and.failsafe.plugin.version} + + + + + diff --git a/jdbc-adapter/virtualschema-jdbc-adapter/src/main/java/com/exasol/adapter/dialects/AbstractSqlDialect.java b/jdbc-adapter/virtualschema-jdbc-adapter/src/main/java/com/exasol/adapter/dialects/AbstractSqlDialect.java new file mode 100644 index 000000000..24eb90f28 --- /dev/null +++ b/jdbc-adapter/virtualschema-jdbc-adapter/src/main/java/com/exasol/adapter/dialects/AbstractSqlDialect.java @@ -0,0 +1,253 @@ +package com.exasol.adapter.dialects; + +import com.exasol.adapter.jdbc.ColumnAdapterNotes; +import com.exasol.adapter.metadata.ColumnMetadata; +import com.exasol.adapter.metadata.DataType; +import com.exasol.adapter.sql.AggregateFunction; +import com.exasol.adapter.sql.ScalarFunction; + +import java.sql.ResultSet; +import java.sql.SQLException; +import java.sql.Types; +import java.util.*; + +/** + * Abstract implementation of a dialect. We recommend that every dialect should extend this abstract class. + * + * TODO Find solution to handle unsupported types (e.g. exceeding varchar size). E.g. skip column or always truncate or add const-null column or throw error or make configurable + */ +public abstract class AbstractSqlDialect implements SqlDialect { + + protected Set omitParenthesesMap = new HashSet<>(); + + private SqlDialectContext context; + + public AbstractSqlDialect(SqlDialectContext context) { + this.context = context; + } + + @Override + public String getTableCatalogAndSchemaSeparator() { + return "."; + } + + @Override + public MappedTable mapTable(ResultSet tables) throws SQLException { +// for (int i=1; i<=tables.getMetaData().getColumnCount(); ++i) { +// System.out.println(" - " + tables.getMetaData().getColumnName(i) + ": " + tables.getString(i)); +// } + String commentString = tables.getString("REMARKS"); + if (commentString == null) { + commentString = ""; + } + String tableName = changeIdentifierCaseIfNeeded(tables.getString("TABLE_NAME")); + return MappedTable.createMappedTable(tableName, commentString); + } + + @Override + public ColumnMetadata mapColumn(ResultSet columns) throws SQLException { + String colName = changeIdentifierCaseIfNeeded(columns.getString("COLUMN_NAME")); + int jdbcType = columns.getInt("DATA_TYPE"); + // Check if dialect want's to handle this row + DataType colType = mapJdbcType(columns); + if (colType == null) { + colType = defaultJdbcTypeMapping(columns); + } + + // Nullable + boolean isNullable = true; + String nullable = columns.getString("IS_NULLABLE"); + if (nullable != null && nullable.toLowerCase().equals("no")) { + isNullable = false; + } + + // Identity + boolean isIdentity = false; + try { + String identity = columns.getString("IS_AUTOINCREMENT"); + if (identity != null && identity.toLowerCase().equals("yes")) { + isIdentity = true; + } + } catch (SQLException ex) { + //ignore me --some older JDBC drivers (Java 1.5) don't support IS_AUTOINCREMENT + } + + // Default + String defaultString = columns.getString("COLUMN_DEF"); + String defaultValue = ""; + if (defaultString != null) { + defaultValue = defaultString; + } + + // Comment + String comment = ""; + String commentString = columns.getString("REMARKS"); + if (commentString != null && !commentString.isEmpty()) { + comment = commentString; + } + + // Column type + String columnTypeName = columns.getString("TYPE_NAME"); + if (columnTypeName == null) + columnTypeName = ""; + String adapterNotes = ColumnAdapterNotes.serialize(new ColumnAdapterNotes(jdbcType, columnTypeName));; + return new ColumnMetadata(colName, adapterNotes, colType, isNullable, isIdentity, defaultValue, comment); + } + + private static DataType defaultJdbcTypeMapping(ResultSet cols) throws SQLException { + + DataType colType; + int jdbcType = cols.getInt("DATA_TYPE"); + switch (jdbcType) { + case Types.TINYINT: + case Types.SMALLINT: + case Types.INTEGER: + case Types.BIGINT: // Java type long + int intPrec = cols.getInt("COLUMN_SIZE"); + if (intPrec <= DataType.maxExasolDecimalPrecision) { + colType = DataType.createDecimal(intPrec, 0); + } else { + colType = DataType.createVarChar(DataType.maxExasolVarcharSize, DataType.ExaCharset.UTF8); + } + break; + case Types.DECIMAL: + int decimalPrec = cols.getInt("COLUMN_SIZE"); + int decimalScale = cols.getInt("DECIMAL_DIGITS"); + if (decimalPrec <= DataType.maxExasolDecimalPrecision) { + colType = DataType.createDecimal(decimalPrec, decimalScale); + } else { + colType = DataType.createVarChar(DataType.maxExasolVarcharSize, DataType.ExaCharset.UTF8); + } + break; + case Types.NUMERIC: // Java BigInteger + colType = DataType.createVarChar(DataType.maxExasolVarcharSize, DataType.ExaCharset.UTF8); + break; + case Types.REAL: + case Types.FLOAT: + case Types.DOUBLE: + colType = DataType.createDouble(); + break; + case Types.VARCHAR: + case Types.NVARCHAR: + case Types.LONGVARCHAR: + case Types.LONGNVARCHAR: { + int size = cols.getInt("COLUMN_SIZE"); + DataType.ExaCharset charset = (cols.getInt("CHAR_OCTET_LENGTH") == size) ? DataType.ExaCharset.ASCII : DataType.ExaCharset.UTF8; + if (size <= DataType.maxExasolVarcharSize) { + colType = DataType.createVarChar(size, charset); + } else { + colType = DataType.createVarChar(DataType.maxExasolVarcharSize, charset); + } + break; + } + case Types.CHAR: + case Types.NCHAR: { + int size = cols.getInt("COLUMN_SIZE"); + DataType.ExaCharset charset = (cols.getInt("CHAR_OCTET_LENGTH") == size) ? DataType.ExaCharset.ASCII : DataType.ExaCharset.UTF8; + if (size <= DataType.maxExasolCharSize) { + colType = DataType.createChar(size, charset); + } else { + if (size <= DataType.maxExasolVarcharSize) { + colType = DataType.createVarChar(size, charset); + } else { + colType = DataType.createVarChar(DataType.maxExasolVarcharSize, charset); + } + } + break; + } + case Types.DATE: + colType = DataType.createDate(); + break; + case Types.TIMESTAMP: + colType = DataType.createTimestamp(false); + break; + case Types.TIME: + colType = DataType.createVarChar(DataType.maxExasolVarcharSize, DataType.ExaCharset.UTF8); + break; + case Types.BIT: + case Types.BOOLEAN: + colType = DataType.createBool(); + break; + case Types.BINARY: + case Types.VARBINARY: + case Types.LONGVARBINARY: + case Types.BLOB: + case Types.CLOB: + case Types.NCLOB: + colType = DataType.createVarChar(DataType.maxExasolVarcharSize, DataType.ExaCharset.UTF8); + break; + case Types.OTHER: + case Types.JAVA_OBJECT: + case Types.DISTINCT: + case Types.STRUCT: + case Types.ARRAY: + case Types.REF: + case Types.DATALINK: + case Types.SQLXML: + case Types.NULL: + default: + throw new RuntimeException("Unsupported data type (" + jdbcType + ") found in source system, should never happen"); + } + assert(colType != null); + return colType; + } + + private String changeIdentifierCaseIfNeeded(String identifier) { + if (getQuotedIdentifierHandling() == getUnquotedIdentifierHandling()) { + if (getQuotedIdentifierHandling() != IdentifierCaseHandling.INTERPRET_CASE_SENSITIVE) { + // Completely case-insensitive. We can store everything uppercase to allow working with unquoted identifiers in EXASOL + return identifier.toUpperCase(); + } + } + return identifier; + } + + @Override + public boolean omitParentheses(ScalarFunction function) { + return omitParenthesesMap.contains(function); + } + + @Override + public SqlGenerationVisitor getSqlGenerationVisitor(SqlGenerationContext context) { + return new SqlGenerationVisitor(this, context); + } + + @Override + public DataType mapJdbcType(ResultSet cols) throws SQLException { + return null; + } + + @Override + public Map getScalarFunctionAliases() { + return new EnumMap<>(ScalarFunction.class); + } + + @Override + public Map getAggregateFunctionAliases() { + Map aliases = new HashMap<>(); + aliases.put(AggregateFunction.GEO_INTERSECTION_AGGREGATE, "ST_INTERSECTION"); + aliases.put(AggregateFunction.GEO_UNION_AGGREGATE, "ST_UNION"); + return aliases; + } + + @Override + public Map getBinaryInfixFunctionAliases() { + Map aliases = new HashMap<>(); + aliases.put(ScalarFunction.ADD, "+"); + aliases.put(ScalarFunction.SUB, "-"); + aliases.put(ScalarFunction.MULT, "*"); + aliases.put(ScalarFunction.FLOAT_DIV, "/"); + return aliases; + } + + @Override + public Map getPrefixFunctionAliases() { + Map aliases = new HashMap<>(); + aliases.put(ScalarFunction.NEG, "-"); + return aliases; + } + + public SqlDialectContext getContext() { + return context; + } +} diff --git a/jdbc-adapter/virtualschema-jdbc-adapter/src/main/java/com/exasol/adapter/dialects/SqlDialect.java b/jdbc-adapter/virtualschema-jdbc-adapter/src/main/java/com/exasol/adapter/dialects/SqlDialect.java new file mode 100644 index 000000000..711b0d1b1 --- /dev/null +++ b/jdbc-adapter/virtualschema-jdbc-adapter/src/main/java/com/exasol/adapter/dialects/SqlDialect.java @@ -0,0 +1,252 @@ +package com.exasol.adapter.dialects; + +import com.exasol.adapter.capabilities.Capabilities; +import com.exasol.adapter.metadata.ColumnMetadata; +import com.exasol.adapter.metadata.DataType; +import com.exasol.adapter.sql.AggregateFunction; +import com.exasol.adapter.sql.ScalarFunction; + +import java.sql.DatabaseMetaData; +import java.sql.ResultSet; +import java.sql.SQLException; +import java.util.Map; + +/** + * Interface for the implementation of a SQL dialect. All data source specific logic is specified here. + * + *

+ * The responsibilities of a dialect can be be divided into 3 areas: + *

+ * + *

+ * 1. Capabilities:
+ * The dialect defines the set of supported capabilities. See {@link #getCapabilities(Capabilities)} for details. + *

+ * + *

+ * 2. Data Type Mapping:
+ * The dialect defines, how the tables in the data source are mapped to EXASOL virtual tables. + * In particular the data types have to be mapped to EXASOL data types. See {@link #mapJdbcType(ResultSet)} for details. + *

+ * + *

+ * 3. SQL Generation:
+ * The dialect defines how to generate SQL statements in the data source syntax. + * The dialect provides several methods to customize quoting, case-sensitivity, function name aliases, + * and other aspects of the syntax. + * + * The actual SQL generation is done by the separate class {@link SqlGenerationVisitor} (it uses the visitor pattern). + * For things like quoting and case-sensitivity, the SQL generation visitor will ask the dialect how to handle them. + * + * If your dialect has a special SQL syntax which cannot be realized using the methods of {@link SqlDialect}, then you can + * implement your own SQL generation visitor which extends {@link SqlGenerationVisitor}. + * Your custom visitor must then be returned by {@link #getSqlGenerationVisitor(SqlGenerationContext)}. + * For an example look at {@link com.exasol.adapter.dialects.impl.OracleSqlGenerationVisitor}. + *

+ * + * Notes for developing a dialect + * + *

Create a class for your integration test, with the suffix IT.java.

+ * + *

We recommend to extend the abstract class {@link AbstractSqlDialect} instead of directly implementing {@link SqlDialect}.

+ */ +public interface SqlDialect { + + /** + * @return the name that can be used to choose this dialect (user can give this name). Case insensitive. + */ + String getPublicName(); + + // + // CAPABILITIES + // + + /** + * @return The set of capabilities supported by this SQL-Dialect + */ + Capabilities getCapabilities(); + + // + // MAPPING OF METADATA: CATALOGS, SCHEMAS, TABLES AND DATA TYPES + // + + enum SchemaOrCatalogSupport { + SUPPORTED, + UNSUPPORTED, + UNKNOWN + } + + /** + * @return True, if the database "truly" supports the concept of JDBC catalogs (not just a single dummy catalog). If true, the user must specify the catalog. + * False, if the database does not have a catalog concept, e.g. if it has no catalogs, or a single dummy catalog, or even if it throws an Exception for {@link DatabaseMetaData#getCatalogs()}. If false, the user must not specify the catalog. + */ + SchemaOrCatalogSupport supportsJdbcCatalogs(); + + /** + * @return True, if the database "truly" supports the concept of JDBC schemas (not just a single dummy schema). If true, the user must specify the schema. + * False, if the database does not have a schema concept, e.g. if it has no schemas, or a single dummy schemas, or even if it throws an Exception for {@link DatabaseMetaData#getSchemas()}. If false, the user must not specify the schema. + */ + SchemaOrCatalogSupport supportsJdbcSchemas(); + + class MappedTable { + private boolean isIgnored = false; + private String tableName = ""; + private String tableComment = ""; + public static MappedTable createMappedTable(String tableName, String tableComment) { + MappedTable t = new MappedTable(); + t.isIgnored = false; + t.tableName = tableName; + t.tableComment = tableComment; + return t; + } + public static MappedTable createIgnoredTable() { + MappedTable t = new MappedTable(); + t.isIgnored = true; + return t; + } + public boolean isIgnored() { return isIgnored; } + public String getTableName() { return tableName; } + public String getTableComment() { return tableComment; } + } + + /** + * @param tables A jdbc Resultset for the {@link DatabaseMetaData#getTables(String, String, String, String[])} call, pointing to the current table. + * @return An instance of {@link MappedTable} describing the mapped table. + */ + MappedTable mapTable(ResultSet tables) throws SQLException; + + /** + * @param columns A jdbc Resultset for the {@link DatabaseMetaData#getColumns(String, String, String, String)} call, pointing to the current column. + * @return The mapped column + * @throws SQLException + */ + ColumnMetadata mapColumn(ResultSet columns) throws SQLException; + + /** + * Maps the jdbc datatype information of a column to the EXASOL datatype of the column. + * The dialect can also return null, so that the default mapping occurs. + * This method will be called by {@link #mapColumn(ResultSet)} in the default implementation. + * + * @param cols A jdbc Resultset for the {@link DatabaseMetaData#getColumns(String, String, String, String)} call, pointing to the current column. + * @return Either null, if the default datatype mapping shall be applied, + * or the datatype which the current column shall be mapped to. + * This datatype will be used as the datatype in the virtual table. + */ + DataType mapJdbcType(ResultSet cols) throws SQLException; + + // + // SQL GENERATION + // + + /** + * How unquoted or quoted identifiers in queries or DDLs are handled + */ + enum IdentifierCaseHandling { + INTERPRET_AS_LOWER, + INTERPRET_AS_UPPER, + INTERPRET_CASE_SENSITIVE + } + + /** + * @return How to handle case sensitivity of unquoted identifiers + */ + IdentifierCaseHandling getUnquotedIdentifierHandling(); + + /** + * @return How to handle case sensitivity of quoted identifiers + */ + IdentifierCaseHandling getQuotedIdentifierHandling(); + + /** + * @param identifier The name of an identifier (table or column). If identifiers are case sensitive, the identifier must be passed case-sensitive of course. + * @return the quoted identifier, also if quoting is not required + */ + String applyQuote(String identifier); + + /** + * @param identifier The name of an identifier (table or column). + * @return the quoted identifier, if this name requires quoting, or the unquoted identifier, if no quoting is required. + */ + String applyQuoteIfNeeded(String identifier); + + /** + * @return True if table names must be catalog-qualified, e.g. SELECT * FROM MY_CATALOG.MY_TABLE, otherwise false. + * Can be combined with {@link #requiresSchemaQualifiedTableNames(SqlGenerationContext)} + */ + boolean requiresCatalogQualifiedTableNames(SqlGenerationContext context); + + /** + * @return True if table names must be schema-qualified, e.g. SELECT * FROM MY_SCHEMA.MY_TABLE, otherwise false. + * Can be combined with {@link #requiresCatalogQualifiedTableNames(SqlGenerationContext)} + */ + boolean requiresSchemaQualifiedTableNames(SqlGenerationContext context); + + /** + * @return String that is used to separate the catalog and/or the schema from the tablename. In many cases this is a dot. + */ + String getTableCatalogAndSchemaSeparator(); + + enum NullSorting { + // NULL values are sorted at the end regardless of sort order + NULLS_SORTED_AT_END, + + // NULL values are sorted at the start regardless of sort order + NULLS_SORTED_AT_START, + + // NULL values are sorted high + NULLS_SORTED_HIGH, + + // NULL values are sorted low + NULLS_SORTED_LOW + } + + /** + * @return The behavior how nulls are sorted in an ORDER BY. If the null sorting behavior is + * not {@link NullSorting#NULLS_SORTED_AT_END} and your dialects has the order by + * capability but you cannot explicitly specify NULLS FIRST or NULLS LAST, then you must + * overwrite the SQL generation to somehow obtain the desired semantic. + */ + NullSorting getDefaultNullSorting(); + + /** + * @param value a string literal value + * @return the string literal in valid SQL syntax, e.g. "value" becomes "'value'". This might include escaping + */ + String getStringLiteral(String value); + + /** + * @return aliases for scalar functions. To be defined for each function that has the same semantic but a different name in the data source. + * If an alias for the same function is defined in {@link #getBinaryInfixFunctionAliases()}, than the infix alias will be ignored. + */ + Map getScalarFunctionAliases(); + + /** + * @return Defines which binary scalar functions should be treated infix and how. E.g. a map entry ("ADD", "+") causes a function call "ADD(1,2)" to be written as "1 + 2". + */ + Map getBinaryInfixFunctionAliases(); + + /** + * @return Defines which unary scalar functions should be treated prefix and how. E.g. a map entry ("NEG", "-") causes a function call "NEG(2)" to be written as "-2". + */ + Map getPrefixFunctionAliases(); + + /** + * @return aliases for aggregate functions. To be defined for each function that has the same semantic but a different name in the data source. + */ + Map getAggregateFunctionAliases(); + + /** + * @return Returns true for functions with zero arguments if they do not require parentheses (e.g. SYSDATE). + */ + boolean omitParentheses(ScalarFunction function); + + /** + * Returns the Visitor to be used for SQL generation. + * Use this only if you need to, i.e. if you have requirements which cannot + * be realized via the other methods provided by {@link SqlDialect}. + * + * @param context context information for the sql generation visitor + * @return the SqlGenerationVisitor to be used for this dialect + */ + SqlGenerationVisitor getSqlGenerationVisitor(SqlGenerationContext context); +} diff --git a/jdbc-adapter/virtualschema-jdbc-adapter/src/main/java/com/exasol/adapter/dialects/SqlDialectContext.java b/jdbc-adapter/virtualschema-jdbc-adapter/src/main/java/com/exasol/adapter/dialects/SqlDialectContext.java new file mode 100644 index 000000000..cd829b4d9 --- /dev/null +++ b/jdbc-adapter/virtualschema-jdbc-adapter/src/main/java/com/exasol/adapter/dialects/SqlDialectContext.java @@ -0,0 +1,19 @@ +package com.exasol.adapter.dialects; + +import com.exasol.adapter.jdbc.SchemaAdapterNotes; + +/** + * Context information required by {@link SqlDialect} + */ +public class SqlDialectContext { + + private SchemaAdapterNotes schemaAdapterNotes; + + public SqlDialectContext(SchemaAdapterNotes schemaAdapterNotes) { + this.schemaAdapterNotes = schemaAdapterNotes; + } + + public SchemaAdapterNotes getSchemaAdapterNotes() { + return schemaAdapterNotes; + } +} diff --git a/jdbc-adapter/virtualschema-jdbc-adapter/src/main/java/com/exasol/adapter/dialects/SqlDialects.java b/jdbc-adapter/virtualschema-jdbc-adapter/src/main/java/com/exasol/adapter/dialects/SqlDialects.java new file mode 100644 index 000000000..a3e34731e --- /dev/null +++ b/jdbc-adapter/virtualschema-jdbc-adapter/src/main/java/com/exasol/adapter/dialects/SqlDialects.java @@ -0,0 +1,63 @@ +package com.exasol.adapter.dialects; + +import com.exasol.adapter.dialects.impl.*; + +import java.util.List; + +/** + * Manages a set of supported SqlDialects. + */ +public class SqlDialects { + + private List supportedDialects; + + private List> dialects; + + public SqlDialects(List supportedDialects) { + this.supportedDialects = supportedDialects; + } + + public boolean isSupported(String dialectName) { + for (String curName : supportedDialects) { + if (curName.equalsIgnoreCase(dialectName)) { + return true; + } + } + return false; + } + + public SqlDialect getDialectByName(String name, SqlDialectContext context) { + if (name.equalsIgnoreCase(GenericSqlDialect.NAME)) { + return new GenericSqlDialect(context); + } else if (name.equalsIgnoreCase(ExasolSqlDialect.NAME)) { + return new ExasolSqlDialect(context); + } else if (name.equalsIgnoreCase(HiveSqlDialect.NAME)) { + return new HiveSqlDialect(context); + } else if (name.equalsIgnoreCase(ImpalaSqlDialect.NAME)) { + return new ImpalaSqlDialect(context); + } else if (name.equalsIgnoreCase(MysqlSqlDialect.NAME)) { + return new MysqlSqlDialect(context); + } else if (name.equalsIgnoreCase(OracleSqlDialect.NAME)) { + return new OracleSqlDialect(context); + } else { + return null; + } + } + + public List> getDialects() { + return dialects; + } + + public String getDialectsString() { + StringBuilder builder = new StringBuilder(); + boolean first = true; + for (String curName : supportedDialects) { + if (!first) { + builder.append(", "); + } + builder.append(curName); + first = false; + } + return builder.toString(); + } +} diff --git a/jdbc-adapter/virtualschema-jdbc-adapter/src/main/java/com/exasol/adapter/dialects/SqlGenerationContext.java b/jdbc-adapter/virtualschema-jdbc-adapter/src/main/java/com/exasol/adapter/dialects/SqlGenerationContext.java new file mode 100644 index 000000000..164d355df --- /dev/null +++ b/jdbc-adapter/virtualschema-jdbc-adapter/src/main/java/com/exasol/adapter/dialects/SqlGenerationContext.java @@ -0,0 +1,32 @@ +package com.exasol.adapter.dialects; + +/** + * Context information needed during SQL generation. These information are not dialect specific. + * + * Contains information that are globally available during sql generation, but not part of the SqlNode graph. + */ +public class SqlGenerationContext { + + private String catalogName; + private String schemaName; + private boolean isLocal; + + public SqlGenerationContext(String catalogName, String schemaName, boolean isLocal) { + this.catalogName = catalogName; + this.schemaName = schemaName; + this.isLocal = isLocal; + } + + public String getCatalogName() { + return catalogName; + } + + public String getSchemaName() { + return schemaName; + } + + public boolean isLocal() { + return isLocal; + } + +} diff --git a/jdbc-adapter/virtualschema-jdbc-adapter/src/main/java/com/exasol/adapter/dialects/SqlGenerationVisitor.java b/jdbc-adapter/virtualschema-jdbc-adapter/src/main/java/com/exasol/adapter/dialects/SqlGenerationVisitor.java new file mode 100644 index 000000000..b63e65710 --- /dev/null +++ b/jdbc-adapter/virtualschema-jdbc-adapter/src/main/java/com/exasol/adapter/dialects/SqlGenerationVisitor.java @@ -0,0 +1,471 @@ +package com.exasol.adapter.dialects; + +import com.exasol.adapter.metadata.DataType; +import com.exasol.adapter.sql.*; +import com.google.common.base.Joiner; + +import java.util.ArrayList; +import java.util.List; + +/** + * This class has the logic to generate SQL queries based on a graph of {@link SqlNode} elements. + * It uses the visitor pattern. + * This class interacts with the dialects in some situations, e.g. to find out how to handle quoting, + * case-sensitivity. + * + *

+ * If this class is not sufficiently customizable for your use case, you can extend + * this class and override the required methods. You also have to return your custom + * visitor class then in the method {@link SqlDialect#getSqlGenerationVisitor(SqlGenerationContext)}. + * See {@link com.exasol.adapter.dialects.impl.OracleSqlGenerationVisitor} for an example. + *

+ * + * Note on operator associativity and parenthesis generation: Currently we use + * parenthesis almost always. Without parenthesis, two SqlNode graphs with different + * semantic lead to "select 1 = 1 - 1 + 1". Also "SELECT NOT NOT TRUE" needs to be written + * as "SELECT NOT (NOT TRUE)" to work at all, whereas SELECT NOT TRUE works fine + * without parentheses. Currently we make inflationary use of parenthesis to to enforce + * the right semantic, but hopefully there is a better way. + */ +public class SqlGenerationVisitor implements SqlNodeVisitor { + + private SqlDialect dialect; + private SqlGenerationContext context; + + public SqlGenerationVisitor(SqlDialect dialect, SqlGenerationContext context) { + this.dialect = dialect; + this.context = context; + + checkDialectAliases(); + } + + private void checkDialectAliases() { + // Check if dialect provided invalid aliases, which would never be applied. + for (ScalarFunction function : dialect.getScalarFunctionAliases().keySet()) { + if (!function.isSimple()) { + throw new RuntimeException("The dialect " + dialect.getPublicName() + " provided an alias for the non-simple scalar function " + function.name() + ". This alias will never be considered."); + } + } + for (AggregateFunction function : dialect.getAggregateFunctionAliases().keySet()) { + if (!function.isSimple()) { + throw new RuntimeException("The dialect " + dialect.getPublicName() + " provided an alias for the non-simple aggregate function " + function.name() + ". This alias will never be considered."); + } + } + } + + @Override + public String visit(SqlStatementSelect select) { + StringBuilder sql = new StringBuilder(); + sql.append("SELECT "); + sql.append(select.getSelectList().accept(this)); + sql.append(" FROM "); + sql.append(select.getFromClause().accept(this)); + if (select.hasFilter()) { + sql.append(" WHERE "); + sql.append(select.getWhereClause().accept(this)); + } + if (select.hasGroupBy()) { + sql.append(" GROUP BY "); + sql.append(select.getGroupBy().accept(this)); + } + if (select.hasHaving()) { + sql.append(" HAVING "); + sql.append(select.getHaving().accept(this)); + } + if (select.hasOrderBy()) { + sql.append(" "); + sql.append(select.getOrderBy().accept(this)); + } + if (select.hasLimit()) { + sql.append(" "); + sql.append(select.getLimit().accept(this)); + } + return sql.toString(); + } + + @Override + public String visit(SqlSelectList selectList) { + List selectElement = new ArrayList<>(); + if (selectList.isRequestAnyColumn()) { + // The system requested any column + selectElement.add("true"); + } else if (selectList.isSelectStar()) { + selectElement.add("*"); + } else { + for (SqlNode node : selectList.getSons()) { + selectElement.add(node.accept(this)); + } + } + return Joiner.on(", ").join(selectElement); + } + + @Override + public String visit(SqlColumn column) { + return dialect.applyQuoteIfNeeded(column.getName()); + } + + @Override + public String visit(SqlTable table) { + String schemaPrefix = ""; + if (dialect.requiresCatalogQualifiedTableNames(context) && context.getCatalogName() != null && !context.getCatalogName().isEmpty()) { + schemaPrefix = dialect.applyQuoteIfNeeded(context.getCatalogName()) + + dialect.getTableCatalogAndSchemaSeparator(); + } + if (dialect.requiresSchemaQualifiedTableNames(context) && context.getSchemaName() != null && !context.getSchemaName().isEmpty()) { + schemaPrefix += dialect.applyQuoteIfNeeded(context.getSchemaName()) + + dialect.getTableCatalogAndSchemaSeparator(); + } + return schemaPrefix + dialect.applyQuoteIfNeeded(table.getName()); + } + + @Override + public String visit(SqlGroupBy groupBy) { + if (groupBy.getSons().isEmpty()) { + throw new RuntimeException( + "Unexpected internal state (empty group by)"); + } + List selectElement = new ArrayList<>(); + for (SqlNode node : groupBy.getSons()) { + selectElement.add(node.accept(this)); + } + return Joiner.on(", ").join(selectElement); + } + + @Override + public String visit(SqlFunctionAggregate function) { + List argumentsSql = new ArrayList<>(); + for (SqlNode node : function.getSons()) { + argumentsSql.add(node.accept(this)); + } + if (function.getFunctionName().equalsIgnoreCase("count") && argumentsSql.size() == 0) { + argumentsSql.add("*"); + } + String distinctSql = ""; + if (function.hasDistinct()) { + distinctSql = "DISTINCT "; + } + String functionNameInSourceSystem = function.getFunctionName(); + if (dialect.getAggregateFunctionAliases().containsKey(function.getFunction())) { + functionNameInSourceSystem = dialect.getAggregateFunctionAliases().get(function.getFunction()); + } + return functionNameInSourceSystem + "(" + distinctSql + + Joiner.on(", ").join(argumentsSql) + ")"; + } + + @Override + public String visit(SqlFunctionAggregateGroupConcat function) { + StringBuilder builder = new StringBuilder(); + builder.append(function.getFunctionName()); + builder.append("("); + if (function.hasDistinct()) { + builder.append("DISTINCT "); + } + builder.append(function.getConcatExpression().accept(this)); + if (function.getOrderByExpressions().size() > 0) { + builder.append(" ORDER BY "); + for (int i = 0; i < function.getOrderByExpressions().size(); i++) { + if (i > 0) { + builder.append(", "); + } + builder.append(function.getOrderByExpressions().get(i).accept(this)); + boolean shallNullsBeAtTheEnd = !function.getNullsFirstOrderList().get(i); + boolean isAscending = function.getAscendingOrderList().get(i); + if (isAscending == false) { + builder.append(" DESC"); + } + if (shallNullsBeAtTheEnd != nullsAreAtEndByDefault(isAscending, dialect.getDefaultNullSorting())) { + // we have to specify null positioning explicitly, otherwise it would be wrong + builder.append(shallNullsBeAtTheEnd ? " NULLS LAST" : " NULLS FIRST"); + } + } + } + if (function.getSeparator() != null) { + builder.append(" SEPARATOR "); + builder.append("'"); + builder.append(function.getSeparator()); + builder.append("'"); + } + builder.append(")"); + return builder.toString(); + } + + @Override + public String visit(SqlFunctionScalar function) { + List argumentsSql = new ArrayList<>(); + for (SqlNode node : function.getSons()) { + argumentsSql.add(node.accept(this)); + } + String functionNameInSourceSystem = function.getFunctionName(); + if (dialect.getScalarFunctionAliases().containsKey(function.getFunction())) { + // Take alias if one is defined - will overwrite the infix + functionNameInSourceSystem = dialect.getScalarFunctionAliases().get(function.getFunction()); + } else { + if (dialect.getBinaryInfixFunctionAliases().containsKey(function.getFunction())) { + assert (argumentsSql.size() == 2); + String realFunctionName = function.getFunctionName(); + if (dialect.getBinaryInfixFunctionAliases().containsKey(function.getFunction())) { + realFunctionName = dialect.getBinaryInfixFunctionAliases().get(function.getFunction()); + } + return "(" + argumentsSql.get(0) + " " + realFunctionName + " " + + argumentsSql.get(1) + ")"; + } else if (dialect.getPrefixFunctionAliases().containsKey(function.getFunction())) { + assert (argumentsSql.size() == 1); + String realFunctionName = function.getFunctionName(); + if (dialect.getPrefixFunctionAliases().containsKey(function.getFunction())) { + realFunctionName = dialect.getPrefixFunctionAliases().get(function.getFunction()); + } + return "(" + realFunctionName + + argumentsSql.get(0) + ")"; + } + } + if (argumentsSql.size() == 0 && dialect.omitParentheses(function.getFunction())) { + return functionNameInSourceSystem; + } else { + return functionNameInSourceSystem + "(" + Joiner.on(", ").join(argumentsSql) + ")"; + } + } + + @Override + public String visit(SqlFunctionScalarCase function) { + StringBuilder builder = new StringBuilder(); + builder.append("CASE"); + if (function.getBasis() != null) { + builder.append(" "); + builder.append(function.getBasis().accept(this)); + } + for (int i = 0; i < function.getArguments().size(); i++) { + SqlNode node = function.getArguments().get(i); + SqlNode result = function.getResults().get(i); + builder.append(" WHEN "); + builder.append(node.accept(this)); + builder.append(" THEN "); + builder.append(result.accept(this)); + } + if (function.getResults().size() > function.getArguments().size()) { + builder.append(" ELSE "); + builder.append(function.getResults().get(function.getResults().size() - 1).accept(this)); + } + builder.append(" END"); + return builder.toString(); + } + + @Override + public String visit(SqlFunctionScalarCast function) { + StringBuilder builder = new StringBuilder(); + builder.append("CAST"); + builder.append("("); + builder.append(function.getExpression().accept(this)); + builder.append(" AS "); + builder.append(function.getDataType()); + builder.append(")"); + return builder.toString(); + } + + @Override + public String visit(SqlFunctionScalarExtract function) { + String expression = function.getExpression().accept(this); + return function.getFunctionName() + "(" + function.getDateTime() + " FROM "+ expression + ")"; + } + + @Override + public String visit(SqlLimit limit) { + String offsetSql = ""; + if (limit.getOffset() != 0) { + offsetSql = " OFFSET " + limit.getOffset(); + } + return "LIMIT " + limit.getLimit() + offsetSql; + } + + @Override + public String visit(SqlLiteralBool literal) { + if (literal.getValue()) { + return "true"; + } else { + return "false"; + } + } + + @Override + public String visit(SqlLiteralDate literal) { + return "DATE '" + literal.getValue() + "'"; // This gets always executed + // as + // TO_DATE('2015-02-01','YYYY-MM-DD') + } + + @Override + public String visit(SqlLiteralDouble literal) { + return Double.toString(literal.getValue()); + } + + @Override + public String visit(SqlLiteralExactnumeric literal) { + return literal.getValue().toString(); + } + + @Override + public String visit(SqlLiteralNull literal) { + return "NULL"; + } + + @Override + public String visit(SqlLiteralString literal) { + return dialect.getStringLiteral(literal.getValue()); + } + + @Override + public String visit(SqlLiteralTimestamp literal) { + // TODO Allow dialect to modify behavior + return "TIMESTAMP '" + literal.getValue().toString() + "'"; + } + + @Override + public String visit(SqlLiteralTimestampUtc literal) { + // TODO Allow dialect to modify behavior + return "TIMESTAMP '" + literal.getValue().toString() + "'"; + } + + @Override + public String visit(SqlLiteralInterval literal) { + // TODO Allow dialect to modify behavior + if (literal.getDataType().getIntervalType() == DataType.IntervalType.YEAR_TO_MONTH) { + return "INTERVAL '" + literal.getValue().toString() + + "' YEAR (" + literal.getDataType().getPrecision() + ") TO MONTH"; + } else { + return "INTERVAL '" + literal.getValue().toString() + "' DAY (" + literal.getDataType().getPrecision() + + ") TO SECOND (" + literal.getDataType().getIntervalFraction() + ")"; + } + } + + @Override + public String visit(SqlOrderBy orderBy) { + // ORDER BY [ASC/DESC] [NULLS FIRST/LAST] + // ASC and NULLS LAST are default in EXASOL + List sqlOrderElement = new ArrayList<>(); + for (int i = 0; i < orderBy.getSons().size(); ++i) { + String elementSql = orderBy.getSon(i).accept(this); + boolean shallNullsBeAtTheEnd = orderBy.nullsLast().get(i); + boolean isAscending = orderBy.isAscending().get(i); + if (isAscending == false) { + elementSql += " DESC"; + } + if (shallNullsBeAtTheEnd != nullsAreAtEndByDefault(isAscending, dialect.getDefaultNullSorting())) { + // we have to specify null positioning explicitly, otherwise it would be wrong + elementSql += (shallNullsBeAtTheEnd) ? " NULLS LAST" : " NULLS FIRST"; + } + sqlOrderElement.add(elementSql); + } + return "ORDER BY " + Joiner.on(", ").join(sqlOrderElement); + } + + /** + * @param isAscending true if the desired sort order is ascending, false if descending + * @param defaultNullSorting default null sorting of dialect + * @return true, if the data source would position nulls at end of the resultset if NULLS FIRST/LAST is not specified explicitly. + */ + private boolean nullsAreAtEndByDefault(boolean isAscending, SqlDialect.NullSorting defaultNullSorting) { + if (defaultNullSorting == SqlDialect.NullSorting.NULLS_SORTED_AT_END) { + return true; + } else if (defaultNullSorting == SqlDialect.NullSorting.NULLS_SORTED_AT_START) { + return false; + } else { + if (isAscending) { + return (defaultNullSorting == SqlDialect.NullSorting.NULLS_SORTED_HIGH); + } else { + return !(defaultNullSorting == SqlDialect.NullSorting.NULLS_SORTED_HIGH); + } + } + } + + @Override + public String visit(SqlPredicateAnd predicate) { + List operandsSql = new ArrayList<>(); + for (SqlNode node : predicate.getSons()) { + operandsSql.add(node.accept(this)); + } + return "(" + Joiner.on(" AND ").join(operandsSql) + ")"; + } + + @Override + public String visit(SqlPredicateBetween predicate) { + return predicate.getExpression().accept(this) + " BETWEEN " + + predicate.getBetweenLeft().accept(this) + " AND " + + predicate.getBetweenRight().accept(this); + } + + @Override + public String visit(SqlPredicateEqual predicate) { + return predicate.getLeft().accept(this) + " = " + + predicate.getRight().accept(this); + } + + @Override + public String visit(SqlPredicateInConstList predicate) { + List argumentsSql = new ArrayList<>(); + for (SqlNode node : predicate.getInArguments()) { + argumentsSql.add(node.accept(this)); + } + return predicate.getExpression().accept(this) + " IN (" + + Joiner.on(", ").join(argumentsSql) + ")"; + } + + @Override + public String visit(SqlPredicateLess predicate) { + return predicate.getLeft().accept(this) + " < " + + predicate.getRight().accept(this); + } + + @Override + public String visit(SqlPredicateLessEqual predicate) { + return predicate.getLeft().accept(this) + " <= " + + predicate.getRight().accept(this); + } + + @Override + public String visit(SqlPredicateLike predicate) { + String sql = predicate.getLeft().accept(this) + " LIKE " + + predicate.getPattern().accept(this); + if (predicate.getEscapeChar() != null) { + sql += " ESCAPE " + predicate.getEscapeChar().accept(this); + } + return sql; + } + + @Override + public String visit(SqlPredicateLikeRegexp predicate) { + return predicate.getLeft().accept(this) + " REGEXP_LIKE " + + predicate.getPattern().accept(this); + } + + @Override + public String visit(SqlPredicateNot predicate) { + // "SELECT NOT NOT TRUE" is invalid syntax, "SELECT NOT (NOT TRUE)" works. + return "NOT (" + predicate.getExpression().accept(this) + ")"; + } + + @Override + public String visit(SqlPredicateNotEqual predicate) { + return predicate.getLeft().accept(this) + " != " + + predicate.getRight().accept(this); + } + + @Override + public String visit(SqlPredicateOr predicate) { + List operandsSql = new ArrayList<>(); + for (SqlNode node : predicate.getSons()) { + operandsSql.add(node.accept(this)); + } + return "(" + Joiner.on(" OR ").join(operandsSql) + ")"; + } + + @Override + public String visit(SqlPredicateIsNull predicate) { + return predicate.getExpression().accept(this) + " IS NULL"; + } + + @Override + public String visit(SqlPredicateIsNotNull predicate) { + return predicate.getExpression().accept(this) + " IS NOT NULL"; + + } + +} diff --git a/jdbc-adapter/virtualschema-jdbc-adapter/src/main/java/com/exasol/adapter/dialects/impl/ExasolSqlDialect.java b/jdbc-adapter/virtualschema-jdbc-adapter/src/main/java/com/exasol/adapter/dialects/impl/ExasolSqlDialect.java new file mode 100644 index 000000000..509959353 --- /dev/null +++ b/jdbc-adapter/virtualschema-jdbc-adapter/src/main/java/com/exasol/adapter/dialects/impl/ExasolSqlDialect.java @@ -0,0 +1,132 @@ +package com.exasol.adapter.dialects.impl; + +import com.exasol.adapter.capabilities.Capabilities; +import com.exasol.adapter.dialects.AbstractSqlDialect; +import com.exasol.adapter.dialects.SqlDialectContext; +import com.exasol.adapter.dialects.SqlGenerationContext; +import com.exasol.adapter.metadata.DataType; +import com.exasol.adapter.sql.ScalarFunction; + +import java.sql.ResultSet; +import java.sql.SQLException; + +/** + * This class is work-in-progress + * + * TODO The precision of interval type columns is hardcoded, because it cannot be retrieved via JDBC. Should be retrieved from system table.
+ * TODO The srid of geometry type columns is hardcoded, because it cannot be retrieved via JDBC. Should be retrieved from system table.
+ */ +public class ExasolSqlDialect extends AbstractSqlDialect { + + public ExasolSqlDialect(SqlDialectContext context) { + super(context); + omitParenthesesMap.add(ScalarFunction.SYSDATE); + omitParenthesesMap.add(ScalarFunction.SYSTIMESTAMP); + omitParenthesesMap.add(ScalarFunction.CURRENT_SCHEMA); + omitParenthesesMap.add(ScalarFunction.CURRENT_SESSION); + omitParenthesesMap.add(ScalarFunction.CURRENT_STATEMENT); + omitParenthesesMap.add(ScalarFunction.CURRENT_USER); + } + + public static final String NAME = "EXASOL"; + + public String getPublicName() { + return NAME; + } + + @Override + public SchemaOrCatalogSupport supportsJdbcCatalogs() { + return SchemaOrCatalogSupport.UNSUPPORTED; + } + + @Override + public SchemaOrCatalogSupport supportsJdbcSchemas() { + return SchemaOrCatalogSupport.SUPPORTED; + } + + @Override + public DataType mapJdbcType(ResultSet cols) throws SQLException { + DataType colType = null; + int jdbcType = cols.getInt("DATA_TYPE"); + switch (jdbcType) { + case -104: + // Currently precision is hardcoded, because we cannot retrieve it via EXASOL jdbc driver. + colType = DataType.createIntervalDaySecond(2,3); + break; + case -103: + // Currently precision is hardcoded, because we cannot retrieve it via EXASOL jdbc driver. + colType = DataType.createIntervalYearMonth(2); + break; + case 123: + // Currently srid is hardcoded, because we cannot retrieve it via EXASOL jdbc driver. + colType = DataType.createGeometry(3857); + break; + case 124: + colType = DataType.createTimestamp(true); + break; + } + return colType; + } + + @Override + public Capabilities getCapabilities() { + // Supports all capabilities + Capabilities cap = new Capabilities(); + cap.supportAllCapabilities(); + return cap; + } + + @Override + public IdentifierCaseHandling getUnquotedIdentifierHandling() { + return IdentifierCaseHandling.INTERPRET_AS_UPPER; + } + + @Override + public IdentifierCaseHandling getQuotedIdentifierHandling() { + return IdentifierCaseHandling.INTERPRET_CASE_SENSITIVE; + } + + @Override + public String applyQuote(String identifier) { + // If identifier contains double quotation marks ", it needs to be espaced by another double quotation mark. E.g. "a""b" is the identifier a"b in the db. + return "\"" + identifier.replace("\"", "\"\"") + "\""; + } + + @Override + public String applyQuoteIfNeeded(String identifier) { + // Quoted identifiers can contain any unicode char except dot (.). + // This is a simplified rule, which might cause that some identifiers are quoted although not needed + boolean isSimpleIdentifier = identifier.matches("^[A-Z][0-9A-Z_]*"); + if (isSimpleIdentifier) { + return identifier; + } else { + return applyQuote(identifier); + } + } + + @Override + public boolean requiresCatalogQualifiedTableNames(SqlGenerationContext context) { + return false; + } + + @Override + public boolean requiresSchemaQualifiedTableNames(SqlGenerationContext context) { + // We need schema qualifiers a) if we are in IS_LOCAL mode, i.e. we run statements directly in a subselect without IMPORT FROM JDBC + // and b) if we don't have the schema in the jdbc connection string (like "jdbc:exa:localhost:5555;schema=native") + return true; + // return context.isLocal(); + } + + @Override + public NullSorting getDefaultNullSorting() { + assert(getContext().getSchemaAdapterNotes().isNullsAreSortedHigh()); + return NullSorting.NULLS_SORTED_HIGH; + } + + @Override + public String getStringLiteral(String value) { + // Don't forget to escape single quote + return "'" + value.replace("'", "''") + "'"; + } + +} diff --git a/jdbc-adapter/virtualschema-jdbc-adapter/src/main/java/com/exasol/adapter/dialects/impl/GenericSqlDialect.java b/jdbc-adapter/virtualschema-jdbc-adapter/src/main/java/com/exasol/adapter/dialects/impl/GenericSqlDialect.java new file mode 100644 index 000000000..f804e1015 --- /dev/null +++ b/jdbc-adapter/virtualschema-jdbc-adapter/src/main/java/com/exasol/adapter/dialects/impl/GenericSqlDialect.java @@ -0,0 +1,125 @@ +package com.exasol.adapter.dialects.impl; + +import com.exasol.adapter.capabilities.Capabilities; +import com.exasol.adapter.dialects.AbstractSqlDialect; +import com.exasol.adapter.dialects.SqlDialectContext; +import com.exasol.adapter.dialects.SqlGenerationContext; +import com.exasol.adapter.jdbc.SchemaAdapterNotes; + +/** + * This dialect can be used for data sources where a custom dialect implementation does not yet exists. + * It will obtain all information from the JDBC Metadata. + */ +public class GenericSqlDialect extends AbstractSqlDialect { + + public GenericSqlDialect(SqlDialectContext context) { + super(context); + } + + public static final String NAME = "GENERIC"; + + public String getPublicName() { + return NAME; + } + + @Override + public Capabilities getCapabilities() { + Capabilities cap = new Capabilities(); + return cap; + } + + @Override + public SchemaOrCatalogSupport supportsJdbcCatalogs() { + return SchemaOrCatalogSupport.UNKNOWN; + } + + @Override + public SchemaOrCatalogSupport supportsJdbcSchemas() { + return SchemaOrCatalogSupport.UNKNOWN; + } + + @Override + public IdentifierCaseHandling getUnquotedIdentifierHandling() { + SchemaAdapterNotes adapterNotes = getContext().getSchemaAdapterNotes(); + if (adapterNotes.isSupportsMixedCaseIdentifiers()) { + // Unquoted identifiers are treated case-sensitive and stored mixed case + return IdentifierCaseHandling.INTERPRET_CASE_SENSITIVE; + } else { + if (adapterNotes.isStoresLowerCaseIdentifiers()) { + return IdentifierCaseHandling.INTERPRET_AS_LOWER; + } else if (adapterNotes.isStoresUpperCaseIdentifiers()) { + return IdentifierCaseHandling.INTERPRET_AS_UPPER; + } else if (adapterNotes.isStoresMixedCaseIdentifiers()) { + // This case is a bit strange - case insensitive, but still stores it mixed case + return IdentifierCaseHandling.INTERPRET_CASE_SENSITIVE; + } else { + throw new RuntimeException("Unexpected quote behavior. Adapternotes: " + SchemaAdapterNotes.serialize(adapterNotes)); + } + } + } + + @Override + public IdentifierCaseHandling getQuotedIdentifierHandling() { + SchemaAdapterNotes adapterNotes = getContext().getSchemaAdapterNotes(); + if (adapterNotes.isSupportsMixedCaseQuotedIdentifiers()) { + // Quoted identifiers are treated case-sensitive and stored mixed case + return IdentifierCaseHandling.INTERPRET_CASE_SENSITIVE; + } else { + if (adapterNotes.isStoresLowerCaseQuotedIdentifiers()) { + return IdentifierCaseHandling.INTERPRET_AS_LOWER; + } else if (adapterNotes.isStoresUpperCaseQuotedIdentifiers()) { + return IdentifierCaseHandling.INTERPRET_AS_UPPER; + } else if (adapterNotes.isStoresMixedCaseQuotedIdentifiers()) { + // This case is a bit strange - case insensitive, but still stores it mixed case + return IdentifierCaseHandling.INTERPRET_CASE_SENSITIVE; + } else { + throw new RuntimeException("Unexpected quote behavior. Adapternotes: " + SchemaAdapterNotes.serialize(adapterNotes)); + } + } + } + + @Override + public String applyQuote(String identifier) { + String quoteString = getContext().getSchemaAdapterNotes().getIdentifierQuoteString(); + return quoteString + identifier + quoteString; + } + + @Override + public String applyQuoteIfNeeded(String identifier) { + // We could consider getExtraNameCharacters() here as well to do less quoting + return applyQuote(identifier); + } + + @Override + public boolean requiresCatalogQualifiedTableNames(SqlGenerationContext context) { + return true; + } + + @Override + public boolean requiresSchemaQualifiedTableNames(SqlGenerationContext context) { + // See getCatalogSeparator(): String that this database uses as the separator between a catalog and table name. + // See isCatalogAtStart(): whether a catalog appears at the start of a fully qualified table name + return true; + } + + @Override + public NullSorting getDefaultNullSorting() { + SchemaAdapterNotes notes = getContext().getSchemaAdapterNotes(); + if (notes.isNullsAreSortedAtEnd()) { + return NullSorting.NULLS_SORTED_AT_END; + } else if (notes.isNullsAreSortedAtStart()) { + return NullSorting.NULLS_SORTED_AT_START; + } else if (notes.isNullsAreSortedLow()) { + return NullSorting.NULLS_SORTED_LOW; + } else { + assert (notes.isNullsAreSortedHigh()); + return NullSorting.NULLS_SORTED_HIGH; + } + } + + @Override + public String getStringLiteral(String value) { + return "'" + value.replace("'", "''") + "'"; + } + +} diff --git a/jdbc-adapter/virtualschema-jdbc-adapter/src/main/java/com/exasol/adapter/dialects/impl/HiveSqlDialect.java b/jdbc-adapter/virtualschema-jdbc-adapter/src/main/java/com/exasol/adapter/dialects/impl/HiveSqlDialect.java new file mode 100644 index 000000000..140e2ae45 --- /dev/null +++ b/jdbc-adapter/virtualschema-jdbc-adapter/src/main/java/com/exasol/adapter/dialects/impl/HiveSqlDialect.java @@ -0,0 +1,99 @@ +package com.exasol.adapter.dialects.impl; + +import com.exasol.adapter.capabilities.Capabilities; +import com.exasol.adapter.dialects.AbstractSqlDialect; +import com.exasol.adapter.dialects.SqlDialectContext; +import com.exasol.adapter.dialects.SqlGenerationContext; + +/** + * Dialect for Hive, using the Cloudera Hive JDBC Driver/Connector (developed by Simba). + * + * TODO Finish implementation of this dialect and add as a supported dialect + */ +public class HiveSqlDialect extends AbstractSqlDialect { + + public HiveSqlDialect(SqlDialectContext context) { + super(context); + } + + public static final String NAME = "HIVE"; + + public String getPublicName() { + return NAME; + } + + @Override + public Capabilities getCapabilities() { + Capabilities cap = new Capabilities(); + return cap; + } + + /** + * Quote from user manual "The Cloudera JDBC Driver for Apache Hive supports both catalogs and schemas to make it easy for + * the driver to work with various JDBC applications. Since Hive only organizes tables into + * schemas/databases, the driver provides a synthetic catalog called “HIVE” under which all of the + * schemas/databases are organized. The driver also maps the JDBC schema to the Hive + * schema/database." + */ + @Override + public SchemaOrCatalogSupport supportsJdbcCatalogs() { + return SchemaOrCatalogSupport.UNSUPPORTED; + } + + @Override + public SchemaOrCatalogSupport supportsJdbcSchemas() { + return SchemaOrCatalogSupport.SUPPORTED; + } + + @Override + public IdentifierCaseHandling getUnquotedIdentifierHandling() { + return IdentifierCaseHandling.INTERPRET_AS_LOWER; + } + + @Override + public IdentifierCaseHandling getQuotedIdentifierHandling() { + return IdentifierCaseHandling.INTERPRET_AS_LOWER; + } + + @Override + public String applyQuote(String identifier) { + // If identifier contains double quotation marks ", it needs to be escaped by another double quotation mark. E.g. "a""b" is the identifier a"b in the db. + return "`" + identifier + "`"; + } + + @Override + public String applyQuoteIfNeeded(String identifier) { + // We need to apply quotes only in case of reserved keywords. Since we don't know these (could look up in JDBC Metadata...) we always quote. + return applyQuote(identifier); + } + + @Override + public boolean requiresCatalogQualifiedTableNames(SqlGenerationContext context) { + return false; + } + + @Override + public boolean requiresSchemaQualifiedTableNames(SqlGenerationContext context) { + // We need schema qualifiers a) if we are in IS_LOCAL mode, i.e. we run statements directly in a subselect without IMPORT FROM JDBC + // and b) if we don't have the schema in the jdbc connection string (like "jdbc:exa:localhost:5555;schema=native") + return true; + // return context.isLocal(); + } + + @Override + public NullSorting getDefaultNullSorting() { + // https://cwiki.apache.org/confluence/display/Hive/LanguageManual+SortBy + // In Hive 2.1.0 and later, specifying the null sorting order for each of + // the columns in the "order by" clause is supported. The default null sorting + // order for ASC order is NULLS FIRST, while the default null sorting order for + // DESC order is NULLS LAST. + return NullSorting.NULLS_SORTED_LOW; + } + + @Override + public String getStringLiteral(String value) { + // Don't forget to escape single quote + return "'" + value.replace("'", "''") + "'"; + } + +} diff --git a/jdbc-adapter/virtualschema-jdbc-adapter/src/main/java/com/exasol/adapter/dialects/impl/ImpalaSqlDialect.java b/jdbc-adapter/virtualschema-jdbc-adapter/src/main/java/com/exasol/adapter/dialects/impl/ImpalaSqlDialect.java new file mode 100644 index 000000000..0aa9a1f13 --- /dev/null +++ b/jdbc-adapter/virtualschema-jdbc-adapter/src/main/java/com/exasol/adapter/dialects/impl/ImpalaSqlDialect.java @@ -0,0 +1,162 @@ +package com.exasol.adapter.dialects.impl; + +import com.exasol.adapter.capabilities.*; +import com.exasol.adapter.dialects.AbstractSqlDialect; +import com.exasol.adapter.dialects.SqlDialectContext; +import com.exasol.adapter.dialects.SqlGenerationContext; +import com.exasol.adapter.dialects.SqlGenerationVisitor; + +/** + * Dialect for Impala, using the Cloudera Impala JDBC Driver/Connector (developed by Simba). + * + * See http://www.cloudera.com/documentation/enterprise/latest/topics/impala_langref.html + */ +public class ImpalaSqlDialect extends AbstractSqlDialect { + + public ImpalaSqlDialect(SqlDialectContext context) { + super(context); + } + + public static final String NAME = "IMPALA"; + + public String getPublicName() { + return NAME; + } + + @Override + public Capabilities getCapabilities() { + // Main capabilities + Capabilities cap = new Capabilities(); + cap.supportMainCapability(MainCapability.SELECTLIST_PROJECTION); + cap.supportMainCapability(MainCapability.SELECTLIST_EXPRESSIONS); + cap.supportMainCapability(MainCapability.FILTER_EXPRESSIONS); + cap.supportMainCapability(MainCapability.AGGREGATE_SINGLE_GROUP); + cap.supportMainCapability(MainCapability.AGGREGATE_GROUP_BY_COLUMN); + cap.supportMainCapability(MainCapability.AGGREGATE_GROUP_BY_EXPRESSION); + cap.supportMainCapability(MainCapability.AGGREGATE_GROUP_BY_TUPLE); + cap.supportMainCapability(MainCapability.AGGREGATE_HAVING); + cap.supportMainCapability(MainCapability.ORDER_BY_COLUMN); + cap.supportMainCapability(MainCapability.ORDER_BY_EXPRESSION); + cap.supportMainCapability(MainCapability.LIMIT); + cap.supportMainCapability(MainCapability.LIMIT_WITH_OFFSET); + + // Literals + cap.supportLiteral(LiteralCapability.STRING); + cap.supportLiteral(LiteralCapability.BOOL); + cap.supportLiteral(LiteralCapability.EXACTNUMERIC); + cap.supportLiteral(LiteralCapability.DOUBLE); + cap.supportLiteral(LiteralCapability.NULL); + // TODO Implement timestamp literal + + // Predicates + cap.supportPredicate(PredicateCapability.AND); + cap.supportPredicate(PredicateCapability.OR); + cap.supportPredicate(PredicateCapability.NOT); + cap.supportPredicate(PredicateCapability.EQUAL); + cap.supportPredicate(PredicateCapability.NOTEQUAL); + cap.supportPredicate(PredicateCapability.LESS); + cap.supportPredicate(PredicateCapability.LESSEQUAL); + cap.supportPredicate(PredicateCapability.LIKE); + // LIKE_ESCAPE is not supported + cap.supportPredicate(PredicateCapability.REGEXP_LIKE); + cap.supportPredicate(PredicateCapability.BETWEEN); + cap.supportPredicate(PredicateCapability.IN_CONSTLIST); + cap.supportPredicate(PredicateCapability.IS_NULL); + cap.supportPredicate(PredicateCapability.IS_NOT_NULL); + + // Aggregate Functions + // Unsupported by EXASOL: APPX_MEDIAN (approximate median) + // With Alias: NDV (approximate count distinct) + cap.supportAggregateFunction(AggregateFunctionCapability.AVG); + cap.supportAggregateFunction(AggregateFunctionCapability.COUNT); + cap.supportAggregateFunction(AggregateFunctionCapability.COUNT_STAR); + cap.supportAggregateFunction(AggregateFunctionCapability.COUNT_DISTINCT); + // GROUP_CONCAT with DISTINCT not supported + cap.supportAggregateFunction(AggregateFunctionCapability.GROUP_CONCAT); + cap.supportAggregateFunction(AggregateFunctionCapability.GROUP_CONCAT_SEPARATOR); + cap.supportAggregateFunction(AggregateFunctionCapability.MAX); + cap.supportAggregateFunction(AggregateFunctionCapability.MIN); + cap.supportAggregateFunction(AggregateFunctionCapability.SUM); + cap.supportAggregateFunction(AggregateFunctionCapability.SUM_DISTINCT); + + // TODO Scalar Functions + + return cap; + } + + @Override + public SchemaOrCatalogSupport supportsJdbcCatalogs() { + return SchemaOrCatalogSupport.UNSUPPORTED; + } + + @Override + public SchemaOrCatalogSupport supportsJdbcSchemas() { + return SchemaOrCatalogSupport.SUPPORTED; + } + + /** + * Note from Impala documentation: Impala identifiers are always + * case-insensitive. That is, tables named t1 and T1 always refer to the + * same table, regardless of quote characters. Internally, Impala always + * folds all specified table and column names to lowercase. This is why the + * column headers in query output are always displayed in lowercase. + */ + @Override + public IdentifierCaseHandling getUnquotedIdentifierHandling() { + return IdentifierCaseHandling.INTERPRET_AS_LOWER; + } + + @Override + public IdentifierCaseHandling getQuotedIdentifierHandling() { + return IdentifierCaseHandling.INTERPRET_AS_LOWER; + } + + @Override + public String applyQuote(String identifier) { + // If identifier contains double quotation marks ", it needs to be espaced by another double quotation mark. E.g. "a""b" is the identifier a"b in the db. + return "`" + identifier + "`"; + } + + @Override + public String applyQuoteIfNeeded(String identifier) { + // We need to apply quotes only in case of reserved keywords. Since we don't know these (could look up in JDBC Metadata...) we always quote. + return applyQuote(identifier); + } + + @Override + public boolean requiresCatalogQualifiedTableNames(SqlGenerationContext context) { + return false; + } + + @Override + public boolean requiresSchemaQualifiedTableNames(SqlGenerationContext context) { + // We need schema qualifiers a) if we are in IS_LOCAL mode, i.e. we run statements directly in a subselect without IMPORT FROM JDBC + // and b) if we don't have the schema in the jdbc connection string (like "jdbc:exa:localhost:5555;schema=native") + return true; + // return context.isLocal(); + } + + @Override + public SqlGenerationVisitor getSqlGenerationVisitor(SqlGenerationContext context) { + return new ImpalaSqlGenerationVisitor(this, context); + } + + @Override + public NullSorting getDefaultNullSorting() { + // In Impala 1.2.1 and higher, all NULL values come at the end of the result set for ORDER BY ... ASC queries, + // and at the beginning of the result set for ORDER BY ... DESC queries. + // In effect, NULL is considered greater than all other values for sorting purposes. + // The original Impala behavior always put NULL values at the end, even for ORDER BY ... DESC queries. + // The new behavior in Impala 1.2.1 makes Impala more compatible with other popular database systems. + // In Impala 1.2.1 and higher, you can override or specify the sorting behavior for NULL by adding the clause + // NULLS FIRST or NULLS LAST at the end of the ORDER BY clause. + return NullSorting.NULLS_SORTED_HIGH; + } + + @Override + public String getStringLiteral(String value) { + // Don't forget to escape single quote + return "'" + value.replace("'", "''") + "'"; + } + +} diff --git a/jdbc-adapter/virtualschema-jdbc-adapter/src/main/java/com/exasol/adapter/dialects/impl/ImpalaSqlGenerationVisitor.java b/jdbc-adapter/virtualschema-jdbc-adapter/src/main/java/com/exasol/adapter/dialects/impl/ImpalaSqlGenerationVisitor.java new file mode 100644 index 000000000..eee4625df --- /dev/null +++ b/jdbc-adapter/virtualschema-jdbc-adapter/src/main/java/com/exasol/adapter/dialects/impl/ImpalaSqlGenerationVisitor.java @@ -0,0 +1,72 @@ +package com.exasol.adapter.dialects.impl; + +import com.exasol.adapter.dialects.SqlDialect; +import com.exasol.adapter.dialects.SqlGenerationContext; +import com.exasol.adapter.dialects.SqlGenerationVisitor; +import com.exasol.adapter.sql.*; +import com.google.common.base.Joiner; + +import java.util.ArrayList; +import java.util.List; + +public class ImpalaSqlGenerationVisitor extends SqlGenerationVisitor { + + SqlDialect dialect; + + public ImpalaSqlGenerationVisitor(SqlDialect dialect, SqlGenerationContext context) { + super(dialect, context); + this.dialect = dialect; + } + + @Override + public String visit(SqlPredicateLikeRegexp predicate) { + return predicate.getLeft().accept(this) + " REGEXP " + + predicate.getPattern().accept(this); + } + + @Override + public String visit(SqlFunctionAggregateGroupConcat function) { + // Note that GROUP_CONCAT with DISTINCT is not supported by Impala + StringBuilder builder = new StringBuilder(); + builder.append(function.getFunctionName()); + builder.append("("); + // To use it group_concat with numeric values we would need to sync group_concat(cast(x as string)). Since we cannot compute the type, we always cast + builder.append("CAST("); + builder.append(function.getConcatExpression().accept(this)); + builder.append(" AS STRING)"); + if (function.getSeparator() != null) { + builder.append(", "); + builder.append("'"); + builder.append(function.getSeparator()); + builder.append("'"); + } + builder.append(")"); + return builder.toString(); + } + + @Override + public String visit(SqlFunctionAggregate function) { + boolean isDirectlyInSelectList = (function.hasParent() && function.getParent().getType() == SqlNodeType.SELECT_LIST); + if (function.getFunction() != AggregateFunction.SUM || !isDirectlyInSelectList) { + return super.visit(function); + } else { + // For SUM, the JDBC driver returns type DOUBLE in prepared statement but the actual + // query returns DECIMAL in ResultSetMetadata, so that IMPORT fails. Casting to DOUBLE + // solves the problem. + List argumentsSql = new ArrayList<>(); + for (SqlNode node : function.getSons()) { + argumentsSql.add(node.accept(this)); + } + String distinctSql = ""; + if (function.hasDistinct()) { + distinctSql = "DISTINCT "; + } + String functionNameInSourceSystem = function.getFunctionName(); + if (dialect.getAggregateFunctionAliases().containsKey(function.getFunction())) { + functionNameInSourceSystem = dialect.getAggregateFunctionAliases().get(function.getFunction()); + } + return "CAST(" + functionNameInSourceSystem + "(" + distinctSql + + Joiner.on(", ").join(argumentsSql) + ") AS DOUBLE)"; + } + } +} diff --git a/jdbc-adapter/virtualschema-jdbc-adapter/src/main/java/com/exasol/adapter/dialects/impl/MysqlSqlDialect.java b/jdbc-adapter/virtualschema-jdbc-adapter/src/main/java/com/exasol/adapter/dialects/impl/MysqlSqlDialect.java new file mode 100644 index 000000000..86f4573e4 --- /dev/null +++ b/jdbc-adapter/virtualschema-jdbc-adapter/src/main/java/com/exasol/adapter/dialects/impl/MysqlSqlDialect.java @@ -0,0 +1,86 @@ +package com.exasol.adapter.dialects.impl; + +import com.exasol.adapter.capabilities.Capabilities; +import com.exasol.adapter.dialects.AbstractSqlDialect; +import com.exasol.adapter.dialects.SqlDialectContext; +import com.exasol.adapter.dialects.SqlGenerationContext; + +/** + * Dialect for MySQL using the MySQL Connector jdbc driver. + * + * TODO Finish implementation of this dialect and add as a supported dialect + */ +public class MysqlSqlDialect extends AbstractSqlDialect { + + public MysqlSqlDialect(SqlDialectContext context) { + super(context); + } + + public static final String NAME = "MYSQL"; + + public String getPublicName() { + return NAME; + } + + @Override + public Capabilities getCapabilities() { + Capabilities cap = new Capabilities(); + return cap; + } + + @Override + public SchemaOrCatalogSupport supportsJdbcCatalogs() { + return SchemaOrCatalogSupport.SUPPORTED; + } + + @Override + public SchemaOrCatalogSupport supportsJdbcSchemas() { + return SchemaOrCatalogSupport.UNSUPPORTED; + } + + @Override + public IdentifierCaseHandling getUnquotedIdentifierHandling() { + return IdentifierCaseHandling.INTERPRET_AS_UPPER; + } + + @Override + public IdentifierCaseHandling getQuotedIdentifierHandling() { + return IdentifierCaseHandling.INTERPRET_CASE_SENSITIVE; + } + + @Override + public String applyQuote(String identifier) { + // TODO ANSI_QUOTES option. Must be obtained from JDBC DatabaseMetadata. http://dev.mysql.com/doc/refman/5.7/en/sql-mode.html#sqlmode_ansi_quotes + CharSequence quoteChar = "`"; + return quoteChar + identifier.replace(quoteChar, quoteChar + "" + quoteChar) + quoteChar; + } + + @Override + public String applyQuoteIfNeeded(String identifier) { + return applyQuote(identifier); + } + + @Override + public boolean requiresCatalogQualifiedTableNames(SqlGenerationContext context) { + return true; + } + + @Override + public boolean requiresSchemaQualifiedTableNames(SqlGenerationContext context) { + return false; + } + + @Override + public NullSorting getDefaultNullSorting() { + // See http://stackoverflow.com/questions/2051602/mysql-orderby-a-number-nulls-last + // and also http://stackoverflow.com/questions/9307613/mysql-order-by-null-first-and-desc-after + assert(getContext().getSchemaAdapterNotes().isNullsAreSortedLow()); + return NullSorting.NULLS_SORTED_LOW; + } + + @Override + public String getStringLiteral(String value) { + return "'" + value.replace("'", "''") + "'"; + } + +} diff --git a/jdbc-adapter/virtualschema-jdbc-adapter/src/main/java/com/exasol/adapter/dialects/impl/OracleSqlDialect.java b/jdbc-adapter/virtualschema-jdbc-adapter/src/main/java/com/exasol/adapter/dialects/impl/OracleSqlDialect.java new file mode 100644 index 000000000..fa9cd53b2 --- /dev/null +++ b/jdbc-adapter/virtualschema-jdbc-adapter/src/main/java/com/exasol/adapter/dialects/impl/OracleSqlDialect.java @@ -0,0 +1,431 @@ +package com.exasol.adapter.dialects.impl; + +import com.exasol.adapter.capabilities.*; +import com.exasol.adapter.dialects.AbstractSqlDialect; +import com.exasol.adapter.dialects.SqlDialectContext; +import com.exasol.adapter.dialects.SqlGenerationContext; +import com.exasol.adapter.dialects.SqlGenerationVisitor; +import com.exasol.adapter.metadata.DataType; +import com.exasol.adapter.sql.AggregateFunction; +import com.exasol.adapter.sql.ScalarFunction; + +import java.sql.ResultSet; +import java.sql.SQLException; +import java.sql.Types; +import java.util.EnumMap; +import java.util.Map; + +/** + * Work in Progress + */ +public class OracleSqlDialect extends AbstractSqlDialect { + + private boolean castAggFuncToFloat = true; + private boolean castScalarFuncToFloat = true; + + public OracleSqlDialect(SqlDialectContext context) { + super(context); + omitParenthesesMap.add(ScalarFunction.SYSDATE); + omitParenthesesMap.add(ScalarFunction.SYSTIMESTAMP); + } + + public static final String NAME = "ORACLE"; + + public String getPublicName() { + return NAME; + } + + public boolean getCastAggFuncToFloat() { + return castAggFuncToFloat; + } + + public boolean getCastScalarFuncToFloat() { + return castScalarFuncToFloat; + } + + @Override + public Capabilities getCapabilities() { + Capabilities cap = new Capabilities(); + + // Capabilities + cap.supportMainCapability(MainCapability.SELECTLIST_PROJECTION); + cap.supportMainCapability(MainCapability.SELECTLIST_EXPRESSIONS); + cap.supportMainCapability(MainCapability.FILTER_EXPRESSIONS); + cap.supportMainCapability(MainCapability.AGGREGATE_SINGLE_GROUP); + cap.supportMainCapability(MainCapability.AGGREGATE_GROUP_BY_COLUMN); + cap.supportMainCapability(MainCapability.AGGREGATE_GROUP_BY_EXPRESSION); + cap.supportMainCapability(MainCapability.AGGREGATE_GROUP_BY_TUPLE); + cap.supportMainCapability(MainCapability.AGGREGATE_HAVING); + cap.supportMainCapability(MainCapability.ORDER_BY_COLUMN); + cap.supportMainCapability(MainCapability.ORDER_BY_EXPRESSION); + cap.supportMainCapability(MainCapability.LIMIT); + cap.supportMainCapability(MainCapability.LIMIT_WITH_OFFSET); + + // Predicates + cap.supportPredicate(PredicateCapability.AND); + cap.supportPredicate(PredicateCapability.OR); + cap.supportPredicate(PredicateCapability.NOT); + cap.supportPredicate(PredicateCapability.EQUAL); + cap.supportPredicate(PredicateCapability.NOTEQUAL); + cap.supportPredicate(PredicateCapability.LESS); + cap.supportPredicate(PredicateCapability.LESSEQUAL); + cap.supportPredicate(PredicateCapability.LIKE); + cap.supportPredicate(PredicateCapability.LIKE_ESCAPE); + cap.supportPredicate(PredicateCapability.REGEXP_LIKE); + cap.supportPredicate(PredicateCapability.BETWEEN); + cap.supportPredicate(PredicateCapability.IN_CONSTLIST); + cap.supportPredicate(PredicateCapability.IS_NULL); + cap.supportPredicate(PredicateCapability.IS_NOT_NULL); + + // Literals + // BOOL is not supported + cap.supportLiteral(LiteralCapability.NULL); + cap.supportLiteral(LiteralCapability.DATE); + cap.supportLiteral(LiteralCapability.TIMESTAMP); + cap.supportLiteral(LiteralCapability.TIMESTAMP_UTC); + cap.supportLiteral(LiteralCapability.DOUBLE); + cap.supportLiteral(LiteralCapability.EXACTNUMERIC); + cap.supportLiteral(LiteralCapability.STRING); + cap.supportLiteral(LiteralCapability.INTERVAL); + + // Aggregate functions + cap.supportAggregateFunction(AggregateFunctionCapability.COUNT); + cap.supportAggregateFunction(AggregateFunctionCapability.COUNT_STAR); + cap.supportAggregateFunction(AggregateFunctionCapability.COUNT_DISTINCT); + cap.supportAggregateFunction(AggregateFunctionCapability.GROUP_CONCAT); + // GROUP_CONCAT_DISTINCT is not supported + cap.supportAggregateFunction(AggregateFunctionCapability.GROUP_CONCAT_SEPARATOR); + cap.supportAggregateFunction(AggregateFunctionCapability.GROUP_CONCAT_ORDER_BY); + // GEO_INTERSECTION_AGGREGATE is not supported + // GEO_UNION_AGGREGATE is not supported + // APPROXIMATE_COUNT_DISTINCT supported with version >= 12.1.0.2 + if (castAggFuncToFloat) { + // Cast result to FLOAT because result set precision = 0, scale = 0 + cap.supportAggregateFunction(AggregateFunctionCapability.SUM); + cap.supportAggregateFunction(AggregateFunctionCapability.SUM_DISTINCT); + cap.supportAggregateFunction(AggregateFunctionCapability.MIN); + cap.supportAggregateFunction(AggregateFunctionCapability.MAX); + cap.supportAggregateFunction(AggregateFunctionCapability.AVG); + cap.supportAggregateFunction(AggregateFunctionCapability.AVG_DISTINCT); + cap.supportAggregateFunction(AggregateFunctionCapability.MEDIAN); + cap.supportAggregateFunction(AggregateFunctionCapability.FIRST_VALUE); + cap.supportAggregateFunction(AggregateFunctionCapability.LAST_VALUE); + cap.supportAggregateFunction(AggregateFunctionCapability.STDDEV); + cap.supportAggregateFunction(AggregateFunctionCapability.STDDEV_DISTINCT); + cap.supportAggregateFunction(AggregateFunctionCapability.STDDEV_POP); + // STDDEV_POP_DISTINCT + cap.supportAggregateFunction(AggregateFunctionCapability.STDDEV_SAMP); + // STDDEV_SAMP_DISTINCT + cap.supportAggregateFunction(AggregateFunctionCapability.VARIANCE); + cap.supportAggregateFunction(AggregateFunctionCapability.VARIANCE_DISTINCT); + cap.supportAggregateFunction(AggregateFunctionCapability.VAR_POP); + // VAR_POP_DISTINCT + cap.supportAggregateFunction(AggregateFunctionCapability.VAR_SAMP); + // VAR_SAMP_DISTINCT + } + + // Scalar functions + cap.supportScalarFunction(ScalarFunctionCapability.CEIL); + cap.supportScalarFunction(ScalarFunctionCapability.DIV); + cap.supportScalarFunction(ScalarFunctionCapability.FLOOR); + // ROUND is not supported. DATETIME could be pushed down, NUMBER would have to be rounded. + cap.supportScalarFunction(ScalarFunctionCapability.SIGN); + // TRUNC is not supported. DATETIME could be pushed down, NUMBER would have to be rounded. + if (castScalarFuncToFloat) { + // Cast result to FLOAT because result set precision = 0, scale = 0 + cap.supportScalarFunction(ScalarFunctionCapability.ADD); + cap.supportScalarFunction(ScalarFunctionCapability.SUB); + cap.supportScalarFunction(ScalarFunctionCapability.MULT); + cap.supportScalarFunction(ScalarFunctionCapability.FLOAT_DIV); + cap.supportScalarFunction(ScalarFunctionCapability.NEG); + cap.supportScalarFunction(ScalarFunctionCapability.ABS); + cap.supportScalarFunction(ScalarFunctionCapability.ACOS); + cap.supportScalarFunction(ScalarFunctionCapability.ASIN); + cap.supportScalarFunction(ScalarFunctionCapability.ATAN); + cap.supportScalarFunction(ScalarFunctionCapability.ATAN2); + cap.supportScalarFunction(ScalarFunctionCapability.COS); + cap.supportScalarFunction(ScalarFunctionCapability.COSH); + cap.supportScalarFunction(ScalarFunctionCapability.COT); + cap.supportScalarFunction(ScalarFunctionCapability.DEGREES); + cap.supportScalarFunction(ScalarFunctionCapability.EXP); + cap.supportScalarFunction(ScalarFunctionCapability.GREATEST); + cap.supportScalarFunction(ScalarFunctionCapability.LEAST); + cap.supportScalarFunction(ScalarFunctionCapability.LN); + cap.supportScalarFunction(ScalarFunctionCapability.LOG); + cap.supportScalarFunction(ScalarFunctionCapability.MOD); + cap.supportScalarFunction(ScalarFunctionCapability.POWER); + cap.supportScalarFunction(ScalarFunctionCapability.RADIANS); + // RAND is not supported (constant arguments in EXA, will not be pushed down) + cap.supportScalarFunction(ScalarFunctionCapability.SIN); + cap.supportScalarFunction(ScalarFunctionCapability.SINH); + cap.supportScalarFunction(ScalarFunctionCapability.SQRT); + cap.supportScalarFunction(ScalarFunctionCapability.TAN); + cap.supportScalarFunction(ScalarFunctionCapability.TANH); + } + cap.supportScalarFunction(ScalarFunctionCapability.ASCII); + // BIT_LENGTH is not supported. Can be different for Unicode characters. + cap.supportScalarFunction(ScalarFunctionCapability.CHR); + // COLOGNE_PHONETIC is not supported. + // CONCAT is not supported. Number of arguments can be different. + // DUMP is not supported. Output is different. + // EDIT_DISTANCE is not supported. Output is different. UTL_MATCH.EDIT_DISTANCE returns -1 with NULL argument. + // INSERT is not supported. + cap.supportScalarFunction(ScalarFunctionCapability.INSTR); + cap.supportScalarFunction(ScalarFunctionCapability.LENGTH); + cap.supportScalarFunction(ScalarFunctionCapability.LOCATE); + cap.supportScalarFunction(ScalarFunctionCapability.LOWER); + cap.supportScalarFunction(ScalarFunctionCapability.LPAD); + cap.supportScalarFunction(ScalarFunctionCapability.LTRIM); + // OCTET_LENGTH is not supported. Can be different for Unicode characters. + cap.supportScalarFunction(ScalarFunctionCapability.REGEXP_INSTR); + cap.supportScalarFunction(ScalarFunctionCapability.REGEXP_REPLACE); + cap.supportScalarFunction(ScalarFunctionCapability.REGEXP_SUBSTR); + cap.supportScalarFunction(ScalarFunctionCapability.REPEAT); + cap.supportScalarFunction(ScalarFunctionCapability.REPLACE); + cap.supportScalarFunction(ScalarFunctionCapability.REVERSE); + // RIGHT is not supported. Possible solution with SUBSTRING (must handle corner cases correctly). + cap.supportScalarFunction(ScalarFunctionCapability.RPAD); + cap.supportScalarFunction(ScalarFunctionCapability.RTRIM); + cap.supportScalarFunction(ScalarFunctionCapability.SOUNDEX); + // SPACE is not supported. Parameter = 0 has different results from RPAD. + cap.supportScalarFunction(ScalarFunctionCapability.SUBSTR); + cap.supportScalarFunction(ScalarFunctionCapability.TRANSLATE); + cap.supportScalarFunction(ScalarFunctionCapability.TRIM); + // UNICODE is not supported. + // UNICODECHR is not supported. + cap.supportScalarFunction(ScalarFunctionCapability.UPPER); + cap.supportScalarFunction(ScalarFunctionCapability.ADD_DAYS); + cap.supportScalarFunction(ScalarFunctionCapability.ADD_HOURS); + cap.supportScalarFunction(ScalarFunctionCapability.ADD_MINUTES); + cap.supportScalarFunction(ScalarFunctionCapability.ADD_MONTHS); + cap.supportScalarFunction(ScalarFunctionCapability.ADD_SECONDS); + cap.supportScalarFunction(ScalarFunctionCapability.ADD_WEEKS); + cap.supportScalarFunction(ScalarFunctionCapability.ADD_YEARS); + // CONVERT_TZ is not supported. + cap.supportScalarFunction(ScalarFunctionCapability.CURRENT_DATE); + cap.supportScalarFunction(ScalarFunctionCapability.CURRENT_TIMESTAMP); + // DATE_TRUNC is not supported. Format options for TRUNCATE are different. + // DAY is not supported. EXTRACT does not work on strings. + // DAYS_BETWEEN is not supported. EXTRACT does not work on strings. + cap.supportScalarFunction(ScalarFunctionCapability.DBTIMEZONE); + // EXTRACT is not supported. SECOND must be cast to DOUBLE. + // HOURS_BETWEEN is not supported. EXTRACT does not work on strings. + cap.supportScalarFunction(ScalarFunctionCapability.LOCALTIMESTAMP); + // MINUTE is not supported. EXTRACT does not work on strings. + // MINUTES_BETWEEN is not supported. EXTRACT does not work on strings. + // MONTH is not supported. EXTRACT does not work on strings. + // MONTHS_BETWEEN is not supported. EXTRACT does not work on strings. + cap.supportScalarFunction(ScalarFunctionCapability.NUMTODSINTERVAL); + cap.supportScalarFunction(ScalarFunctionCapability.NUMTOYMINTERVAL); + // POSIX_TIME is not supported. Does not work on strings. + // SECOND is not supported. EXTRACT does not work on strings. + // SECONDS_BETWEEN is not supported. EXTRACT does not work on strings. + cap.supportScalarFunction(ScalarFunctionCapability.SESSIONTIMEZONE); + cap.supportScalarFunction(ScalarFunctionCapability.SYSDATE); + cap.supportScalarFunction(ScalarFunctionCapability.SYSTIMESTAMP); + // WEEK is not supported. + // YEAR is not supported. EXTRACT does not work on strings. + // YEARS_BETWEEN is not supported. EXTRACT does not work on strings. + // ST_X is not supported. + // ST_Y is not supported. + // ST_ENDPOINT is not supported. + // ST_ISCLOSED is not supported. + // ST_ISRING is not supported. + // ST_LENGTH is not supported. + // ST_NUMPOINTS is not supported. + // ST_POINTN is not supported. + // ST_STARTPOINT is not supported. + // ST_AREA is not supported. + // ST_EXTERIORRING is not supported. + // ST_INTERIORRINGN is not supported. + // ST_NUMINTERIORRINGS is not supported. + // ST_GEOMETRYN is not supported. + // ST_NUMGEOMETRIES is not supported. + // ST_BOUNDARY is not supported. + // ST_BUFFER is not supported. + // ST_CENTROID is not supported. + // ST_CONTAINS is not supported. + // ST_CONVEXHULL is not supported. + // ST_CROSSES is not supported. + // ST_DIFFERENCE is not supported. + // ST_DIMENSION is not supported. + // ST_DISJOINT is not supported. + // ST_DISTANCE is not supported. + // ST_ENVELOPE is not supported. + // ST_EQUALS is not supported. + // ST_FORCE2D is not supported. + // ST_GEOMETRYTYPE is not supported. + // ST_INTERSECTION is not supported. + // ST_INTERSECTS is not supported. + // ST_ISEMPTY is not supported. + // ST_ISSIMPLE is not supported. + // ST_OVERLAPS is not supported. + // ST_SETSRID is not supported. + // ST_SYMDIFFERENCE is not supported. + // ST_TOUCHES is not supported. + // ST_TRANSFORM is not supported. + // ST_UNION is not supported. + // ST_WITHIN is not supported. + cap.supportScalarFunction(ScalarFunctionCapability.CAST); + // IS_NUMBER is not supported. + // IS_BOOLEAN is not supported. + // IS_DATE is not supported. + // IS_DSINTERVAL is not supported. + // IS_YMINTERVAL is not supported. + // IS_TIMESTAMP is not supported. + cap.supportScalarFunction(ScalarFunctionCapability.TO_CHAR); + cap.supportScalarFunction(ScalarFunctionCapability.TO_DATE); + cap.supportScalarFunction(ScalarFunctionCapability.TO_DSINTERVAL); + cap.supportScalarFunction(ScalarFunctionCapability.TO_YMINTERVAL); + cap.supportScalarFunction(ScalarFunctionCapability.TO_NUMBER); + cap.supportScalarFunction(ScalarFunctionCapability.TO_TIMESTAMP); + cap.supportScalarFunction(ScalarFunctionCapability.BIT_AND); + // BIT_CHECK is not supported. + // BIT_NOT is not supported. + // BIT_OR is not supported. + // BIT_SET is not supported. + cap.supportScalarFunction(ScalarFunctionCapability.BIT_TO_NUM); + // BIT_XOR is not supported. + cap.supportScalarFunction(ScalarFunctionCapability.CASE); + // CURRENT_SCHEMA is not supported. + // CURRENT_SESSION is not supported. + // CURRENT_STATEMENT is not supported. + // CURRENT_USER is not supported. + // HASH_MD5 is not supported. + // HASH_SHA is not supported. + // HASH_SHA1 is not supported. + // HASH_TIGER is not supported. + cap.supportScalarFunction(ScalarFunctionCapability.NULLIFZERO); + // SYS_GUID is not supported. + cap.supportScalarFunction(ScalarFunctionCapability.ZEROIFNULL); + + return cap; + } + + @Override + public Map getAggregateFunctionAliases() { + Map aggregationAliases = new EnumMap<>(AggregateFunction.class); + // APPROXIMATE_COUNT_DISTINCT supported with version >= 12.1.0.2 + // aggregationAliases.put(AggregateFunction.APPROXIMATE_COUNT_DISTINCT, "APPROX_COUNT_DISTINCT"); + return aggregationAliases; + } + + @Override + public SchemaOrCatalogSupport supportsJdbcCatalogs() { + return SchemaOrCatalogSupport.UNSUPPORTED; + } + + @Override + public SchemaOrCatalogSupport supportsJdbcSchemas() { + return SchemaOrCatalogSupport.SUPPORTED; + } + + @Override + public MappedTable mapTable(ResultSet tables) throws SQLException { + String tableName = tables.getString("TABLE_NAME"); + if (tableName.startsWith("BIN$")) { + // In case of Oracle we may see deleted tables with strange names (BIN$OeQco6jg/drgUDAKzmRzgA==$0). Should be filtered out. Squirrel also doesn't see them for unknown reasons. See http://stackoverflow.com/questions/2446053/what-are-the-bin-tables-in-oracles-all-tab-columns-table + System.out.println("Skip table: " + tableName); + return MappedTable.createIgnoredTable(); + } else { + return super.mapTable(tables); + } + } + + @Override + public DataType mapJdbcType(ResultSet cols) throws SQLException { + DataType colType = null; + int jdbcType = cols.getInt("DATA_TYPE"); + switch (jdbcType) { + case Types.DECIMAL: + int decimalPrec = cols.getInt("COLUMN_SIZE"); + int decimalScale = cols.getInt("DECIMAL_DIGITS"); + if (decimalScale == -127) { + // Oracle JDBC driver returns scale -127 if NUMBER data type was specified without scale and precision. Convert to VARCHAR. + // See http://docs.oracle.com/cd/B28359_01/server.111/b28318/datatype.htm#i16209 + // and https://docs.oracle.com/cd/E19501-01/819-3659/gcmaz/ + colType = DataType.createVarChar(DataType.maxExasolVarcharSize, DataType.ExaCharset.UTF8); + break; + } + if (decimalPrec <= DataType.maxExasolDecimalPrecision) { + colType = DataType.createDecimal(decimalPrec, decimalScale); + } else { + colType = DataType.createVarChar(DataType.maxExasolVarcharSize, DataType.ExaCharset.UTF8); + } + break; + case Types.OTHER: + // Oracle JDBC uses OTHER as CLOB + colType = DataType.createVarChar(DataType.maxExasolVarcharSize, DataType.ExaCharset.UTF8); + break; + case -103: + // INTERVAL YEAR TO MONTH + case -104: + // INTERVAL DAY TO SECOND + colType = DataType.createVarChar(DataType.maxExasolVarcharSize, DataType.ExaCharset.UTF8); + break; + case -102: + case -101: + // -101 and -102 is TIMESTAMP WITH (LOCAL) TIMEZONE in Oracle. + colType = DataType.createVarChar(DataType.maxExasolVarcharSize, DataType.ExaCharset.UTF8); + break; + case 100: + case 101: + // 100 and 101 are BINARY_FLOAT and BINARY_DOUBLE in Oracle. + colType = DataType.createVarChar(DataType.maxExasolVarcharSize, DataType.ExaCharset.UTF8); + break; + } + return colType; + } + + @Override + public SqlGenerationVisitor getSqlGenerationVisitor(SqlGenerationContext context) { + return new OracleSqlGenerationVisitor(this, context); + } + + @Override + public IdentifierCaseHandling getUnquotedIdentifierHandling() { + return IdentifierCaseHandling.INTERPRET_AS_UPPER; + } + + @Override + public IdentifierCaseHandling getQuotedIdentifierHandling() { + return IdentifierCaseHandling.INTERPRET_CASE_SENSITIVE; + } + + @Override + public String applyQuote(String identifier) { + // If identifier contains double quotation marks ", it needs to be escaped by another double quotation mark. E.g. "a""b" is the identifier a"b in the db. + return "\"" + identifier.replace("\"", "\"\"") + "\""; + } + + @Override + public String applyQuoteIfNeeded(String identifier) { + boolean isSimpleIdentifier = identifier.matches("^[A-Z][0-9A-Z_]*"); + if (isSimpleIdentifier) { + return identifier; + } else { + return applyQuote(identifier); + } + } + + @Override + public boolean requiresCatalogQualifiedTableNames(SqlGenerationContext context) { + return false; + } + + @Override + public boolean requiresSchemaQualifiedTableNames(SqlGenerationContext context) { + return true; + } + + @Override + public NullSorting getDefaultNullSorting() { + return NullSorting.NULLS_SORTED_HIGH; + } + + @Override + public String getStringLiteral(String value) { + return "'" + value.replace("'", "''") + "'"; + } + +} diff --git a/jdbc-adapter/virtualschema-jdbc-adapter/src/main/java/com/exasol/adapter/dialects/impl/OracleSqlGenerationVisitor.java b/jdbc-adapter/virtualschema-jdbc-adapter/src/main/java/com/exasol/adapter/dialects/impl/OracleSqlGenerationVisitor.java new file mode 100644 index 000000000..c125f698f --- /dev/null +++ b/jdbc-adapter/virtualschema-jdbc-adapter/src/main/java/com/exasol/adapter/dialects/impl/OracleSqlGenerationVisitor.java @@ -0,0 +1,536 @@ +package com.exasol.adapter.dialects.impl; + +import com.exasol.adapter.dialects.SqlDialect; +import com.exasol.adapter.dialects.SqlGenerationContext; +import com.exasol.adapter.dialects.SqlGenerationVisitor; +import com.exasol.adapter.jdbc.ColumnAdapterNotes; +import com.exasol.adapter.metadata.ColumnMetadata; +import com.exasol.adapter.metadata.DataType; +import com.exasol.adapter.sql.*; +import com.google.common.base.Joiner; +import com.google.common.collect.ImmutableList; + +import java.util.ArrayList; +import java.util.HashSet; +import java.util.List; +import java.util.Set; + + +public class OracleSqlGenerationVisitor extends SqlGenerationVisitor { + + public OracleSqlGenerationVisitor(SqlDialect dialect, SqlGenerationContext context) { + super(dialect, context); + + if (dialect instanceof OracleSqlDialect && + ((OracleSqlDialect)dialect).getCastAggFuncToFloat()) { + aggregateFunctionsCast.add(AggregateFunction.SUM); + aggregateFunctionsCast.add(AggregateFunction.MIN); + aggregateFunctionsCast.add(AggregateFunction.MAX); + aggregateFunctionsCast.add(AggregateFunction.AVG); + aggregateFunctionsCast.add(AggregateFunction.MEDIAN); + aggregateFunctionsCast.add(AggregateFunction.FIRST_VALUE); + aggregateFunctionsCast.add(AggregateFunction.LAST_VALUE); + aggregateFunctionsCast.add(AggregateFunction.STDDEV); + aggregateFunctionsCast.add(AggregateFunction.STDDEV_POP); + aggregateFunctionsCast.add(AggregateFunction.STDDEV_SAMP); + aggregateFunctionsCast.add(AggregateFunction.VARIANCE); + aggregateFunctionsCast.add(AggregateFunction.VAR_POP); + aggregateFunctionsCast.add(AggregateFunction.VAR_SAMP); + } + + if (dialect instanceof OracleSqlDialect && + ((OracleSqlDialect)dialect).getCastScalarFuncToFloat()) { + scalarFunctionsCast.add(ScalarFunction.ADD); + scalarFunctionsCast.add(ScalarFunction.SUB); + scalarFunctionsCast.add(ScalarFunction.MULT); + scalarFunctionsCast.add(ScalarFunction.FLOAT_DIV); + scalarFunctionsCast.add(ScalarFunction.NEG); + scalarFunctionsCast.add(ScalarFunction.ABS); + scalarFunctionsCast.add(ScalarFunction.ACOS); + scalarFunctionsCast.add(ScalarFunction.ASIN); + scalarFunctionsCast.add(ScalarFunction.ATAN); + scalarFunctionsCast.add(ScalarFunction.ATAN2); + scalarFunctionsCast.add(ScalarFunction.COS); + scalarFunctionsCast.add(ScalarFunction.COSH); + scalarFunctionsCast.add(ScalarFunction.COT); + scalarFunctionsCast.add(ScalarFunction.DEGREES); + scalarFunctionsCast.add(ScalarFunction.EXP); + scalarFunctionsCast.add(ScalarFunction.GREATEST); + scalarFunctionsCast.add(ScalarFunction.LEAST); + scalarFunctionsCast.add(ScalarFunction.LN); + scalarFunctionsCast.add(ScalarFunction.LOG); + scalarFunctionsCast.add(ScalarFunction.MOD); + scalarFunctionsCast.add(ScalarFunction.POWER); + scalarFunctionsCast.add(ScalarFunction.RADIANS); + scalarFunctionsCast.add(ScalarFunction.SIN); + scalarFunctionsCast.add(ScalarFunction.SINH); + scalarFunctionsCast.add(ScalarFunction.SQRT); + scalarFunctionsCast.add(ScalarFunction.TAN); + scalarFunctionsCast.add(ScalarFunction.TANH); + } + } + + // If set to true, the selectlist elements will get aliases such as c1, c2, ... + // Can be refactored if we find a better way to implement it + private boolean requiresSelectListAliasesForLimit = false; + + private Set aggregateFunctionsCast = new HashSet<>(); + private Set scalarFunctionsCast = new HashSet<>(); + + /** + * ORACLE Syntax (before 12c) for LIMIT 10:
+ * SELECT LIMIT_SUBSELECT.* FROM + * ( + * + * ) + * LIMIT_SUBSELECT WHERE ROWNUM <= 30 + * + * ORACLE Syntax (before 12c) for LIMIT 10 OFFSET 20:
+ * SELECT c1, c2, ... FROM + * ( + * SELECT LIMIT_SUBSELECT.*, ROWNUM ROWNUM_SUB FROM + * ( + * + * ) + * LIMIT_SUBSELECT WHERE ROWNUM <= 30 + * ) WHERE ROWNUM_SUB > 20 + * + * The rownum filter is evaluated before ORDER BY, which is why we need subselects + */ + @Override + public String visit(SqlStatementSelect select) { + if (!select.hasLimit()) { + return super.visit(select); + } else { + SqlLimit limit = select.getLimit(); + StringBuilder builder = new StringBuilder(); + + if (limit.hasOffset()) { + // We cannot simply select * because this includes the rownum column. So we need aliases for select list elements. + builder.append("SELECT "); + if (select.getSelectList().isRequestAnyColumn()) { + // The system requested any column + return "true"; + } else if (select.getSelectList().isSelectStar()) { + builder.append(Joiner.on(", ").join(buildAliases(select.getFromClause().getMetadata().getColumns().size()))); + } else { + builder.append(Joiner.on(", ").join(buildAliases(select.getSelectList().getSons().size()))); + } + builder.append(" FROM ( "); + builder.append("SELECT LIMIT_SUBSELECT.*, ROWNUM ROWNUM_SUB FROM ( "); + this.requiresSelectListAliasesForLimit = true; + builder.append(super.visit(select)); + builder.append(" ) LIMIT_SUBSELECT WHERE ROWNUM <= " + (limit.getLimit() + limit.getOffset())); + builder.append(" ) WHERE ROWNUM_SUB > " + limit.getOffset()); + } else { + builder.append("SELECT LIMIT_SUBSELECT.* FROM ( "); + builder.append(super.visit(select)); + builder.append(" ) LIMIT_SUBSELECT WHERE ROWNUM <= " + (limit.getLimit() + limit.getOffset())); + } + return builder.toString(); + } + } + + private List buildAliases(int numSelectListElements) { + List aliases = new ArrayList<>(); + for (int i=0; i selectListElements = new ArrayList<>(); + if (selectList.isSelectStar()) { + if (requiresSelectListAliasesForLimit || selectListRequiresCasts(selectList)) { + // Do as if the user has all columns in select list + SqlStatementSelect select = (SqlStatementSelect) selectList.getParent(); + int columnId = 0; + for (ColumnMetadata columnMeta : select.getFromClause().getMetadata().getColumns()) { + SqlColumn sqlColumn = new SqlColumn(columnId, columnMeta); + selectListElements.add(sqlColumn.accept(this)); + ++columnId; + } + } else { + selectListElements.add("*"); + } + } else { + for (SqlNode node : selectList.getSons()) { + selectListElements.add(node.accept(this)); + } + } + if (requiresSelectListAliasesForLimit) { + // Add aliases to select list elements + for (int i=0; i 0) { + for (int i = 0; i < function.getOrderByExpressions().size(); i++) { + if (i > 0) { + builder.append(", "); + } + builder.append(function.getOrderByExpressions().get(i).accept(this)); + if (!function.getAscendingOrderList().get(i)) { + builder.append(" DESC"); + } + if (function.getNullsFirstOrderList().get(i)) { + builder.append(" NULLS FIRST"); + } + } + } else { + builder.append(expression); + } + builder.append(")"); + return builder.toString(); + } + + @Override + public String visit(SqlFunctionAggregate function) { + String sql = super.visit(function); + boolean isDirectlyInSelectList = (function.hasParent() && function.getParent().getType() == SqlNodeType.SELECT_LIST); + if (isDirectlyInSelectList && aggregateFunctionsCast.contains(function.getFunction())) { + // Cast to FLOAT because result set metadata has precision = 0, scale = 0 + sql = "CAST(" + sql + " AS FLOAT)"; + } + return sql; + } + + @Override + public String visit(SqlFunctionScalar function) { + String sql = super.visit(function); + switch (function.getFunction()) { + case LOCATE: { + List argumentsSql = new ArrayList<>(); + for (SqlNode node : function.getSons()) { + argumentsSql.add(node.accept(this)); + } + StringBuilder builder = new StringBuilder(); + builder.append("INSTR("); + builder.append(argumentsSql.get(1)); + builder.append(", "); + builder.append(argumentsSql.get(0)); + if (argumentsSql.size() > 2) { + builder.append(", "); + builder.append(argumentsSql.get(2)); + } + builder.append(")"); + sql = builder.toString(); + break; + } + case TRIM: { + List argumentsSql = new ArrayList<>(); + for (SqlNode node : function.getSons()) { + argumentsSql.add(node.accept(this)); + } + StringBuilder builder = new StringBuilder(); + builder.append("TRIM("); + if (argumentsSql.size() > 1) { + builder.append(argumentsSql.get(1)); + builder.append(" FROM "); + builder.append(argumentsSql.get(0)); + } else { + builder.append(argumentsSql.get(0)); + } + builder.append(")"); + sql = builder.toString(); + break; + } + case ADD_DAYS: + case ADD_HOURS: + case ADD_MINUTES: + case ADD_SECONDS: + case ADD_WEEKS: + case ADD_YEARS: { + List argumentsSql = new ArrayList<>(); + for (SqlNode node : function.getSons()) { + argumentsSql.add(node.accept(this)); + } + StringBuilder builder = new StringBuilder(); + builder.append("("); + builder.append(argumentsSql.get(0)); + builder.append(" + INTERVAL '"); + if (function.getFunction() == ScalarFunction.ADD_WEEKS) { + builder.append(7 * Integer.parseInt(argumentsSql.get(1))); + } else { + builder.append(argumentsSql.get(1)); + } + builder.append("' "); + switch (function.getFunction()) { + case ADD_DAYS: + case ADD_WEEKS: + builder.append("DAY"); + break; + case ADD_HOURS: + builder.append("HOUR"); + break; + case ADD_MINUTES: + builder.append("MINUTE"); + break; + case ADD_SECONDS: + builder.append("SECOND"); + break; + case ADD_YEARS: + builder.append("YEAR"); + break; + default: + break; + } + builder.append(")"); + sql = builder.toString(); + break; + } + case CURRENT_DATE: + sql = "CURRENT_DATE"; + break; + case CURRENT_TIMESTAMP: + sql = "CURRENT_TIMESTAMP"; + break; + case DBTIMEZONE: + sql = "DBTIMEZONE"; + break; + case LOCALTIMESTAMP: + sql = "LOCALTIMESTAMP"; + break; + case SESSIONTIMEZONE: + sql = "SESSIONTIMEZONE"; + break; + case SYSDATE: + sql = "TO_DATE(SYSDATE)"; + break; + case SYSTIMESTAMP: + sql = "SYSTIMESTAMP"; + break; + case BIT_AND: + sql = sql.replaceFirst("^BIT_AND", "BITAND"); + break; + case BIT_TO_NUM: + sql = sql.replaceFirst("^BIT_TO_NUM", "BIN_TO_NUM"); + break; + case NULLIFZERO: { + List argumentsSql = new ArrayList<>(); + for (SqlNode node : function.getSons()) { + argumentsSql.add(node.accept(this)); + } + StringBuilder builder = new StringBuilder(); + builder.append("NULLIF("); + builder.append(argumentsSql.get(0)); + builder.append(", 0)"); + sql = builder.toString(); + break; + } + case ZEROIFNULL: { + List argumentsSql = new ArrayList<>(); + for (SqlNode node : function.getSons()) { + argumentsSql.add(node.accept(this)); + } + StringBuilder builder = new StringBuilder(); + builder.append("NVL("); + builder.append(argumentsSql.get(0)); + builder.append(", 0)"); + sql = builder.toString(); + break; + } + case DIV: { + List argumentsSql = new ArrayList<>(); + for (SqlNode node : function.getSons()) { + argumentsSql.add(node.accept(this)); + } + StringBuilder builder = new StringBuilder(); + builder.append("CAST(FLOOR("); + builder.append(argumentsSql.get(0)); + builder.append(" / "); + builder.append(argumentsSql.get(1)); + builder.append(") AS NUMBER(36, 0))"); + sql = builder.toString(); + break; + } + case COT: { + List argumentsSql = new ArrayList<>(); + for (SqlNode node : function.getSons()) { + argumentsSql.add(node.accept(this)); + } + StringBuilder builder = new StringBuilder(); + builder.append("(1 / TAN("); + builder.append(argumentsSql.get(0)); + builder.append("))"); + sql = builder.toString(); + break; + } + case DEGREES: { + List argumentsSql = new ArrayList<>(); + for (SqlNode node : function.getSons()) { + argumentsSql.add(node.accept(this)); + } + StringBuilder builder = new StringBuilder(); + builder.append("(("); + builder.append(argumentsSql.get(0)); + // ACOS(-1) = PI + builder.append(") * 180 / ACOS(-1))"); + sql = builder.toString(); + break; + } + case RADIANS: { + List argumentsSql = new ArrayList<>(); + for (SqlNode node : function.getSons()) { + argumentsSql.add(node.accept(this)); + } + StringBuilder builder = new StringBuilder(); + builder.append("(("); + builder.append(argumentsSql.get(0)); + // ACOS(-1) = PI + builder.append(") * ACOS(-1) / 180)"); + sql = builder.toString(); + break; + } + case REPEAT: { + List argumentsSql = new ArrayList<>(); + for (SqlNode node : function.getSons()) { + argumentsSql.add(node.accept(this)); + } + StringBuilder builder = new StringBuilder(); + builder.append("RPAD(TO_CHAR("); + builder.append(argumentsSql.get(0)); + builder.append("), LENGTH("); + builder.append(argumentsSql.get(0)); + builder.append(") * ROUND("); + builder.append(argumentsSql.get(1)); + builder.append("), "); + builder.append(argumentsSql.get(0)); + builder.append(")"); + sql = builder.toString(); + break; + } + case REVERSE: { + List argumentsSql = new ArrayList<>(); + for (SqlNode node : function.getSons()) { + argumentsSql.add(node.accept(this)); + } + StringBuilder builder = new StringBuilder(); + builder.append("REVERSE(TO_CHAR("); + builder.append(argumentsSql.get(0)); + builder.append("))"); + sql = builder.toString(); + break; + } + default: + break; + } + + boolean isDirectlyInSelectList = (function.hasParent() && function.getParent().getType() == SqlNodeType.SELECT_LIST); + if (isDirectlyInSelectList && scalarFunctionsCast.contains(function.getFunction())) { + // Cast to FLOAT because result set metadata has precision = 0, scale = 0 + sql = "CAST(" + sql + " AS FLOAT)"; + } + + return sql; + } + + private String getColumnProjectionString(SqlColumn column, String projString) { + boolean isDirectlyInSelectList = (column.hasParent() && column.getParent().getType() == SqlNodeType.SELECT_LIST); + if (!isDirectlyInSelectList) { + return projString; + } + String typeName = ColumnAdapterNotes.deserialize(column.getMetadata().getAdapterNotes(), column.getMetadata().getName()).getTypeName(); + if (typeName.startsWith("TIMESTAMP") || + typeName.startsWith("INTERVAL") || + typeName.equals("BINARY_FLOAT") || + typeName.equals("BINARY_DOUBLE") || + typeName.equals("CLOB") || + typeName.equals("NCLOB")) { + projString = "TO_CHAR(" + projString + ")"; + } else if (typeName.equals("NUMBER") && + column.getMetadata().getType().getExaDataType() == DataType.ExaDataType.VARCHAR) { + projString = "TO_CHAR(" + projString + ")"; + } else if (typeName.equals("ROWID") || + typeName.equals("UROWID")) { + projString = "ROWIDTOCHAR(" + projString + ")"; + } else if (typeName.equals("BLOB")) { + projString = "UTL_RAW.CAST_TO_VARCHAR2(" + projString + ")"; + } + return projString; + } + + private static final List typeNamesRequiringCast = ImmutableList.of("TIMESTAMP","INTERVAL","BINARY_FLOAT","BINARY_DOUBLE","CLOB","NCLOB","ROWID", "UROWID", "BLOB"); + + private boolean nodeRequiresCast(SqlNode node) { + if (node.getType() == SqlNodeType.COLUMN) { + SqlColumn column = (SqlColumn)node; + String typeName = ColumnAdapterNotes.deserialize(column.getMetadata().getAdapterNotes(), column.getMetadata().getName()).getTypeName(); + return typeNamesRequiringCast.contains(typeName); + } + return false; + } + + private boolean selectListRequiresCasts(SqlSelectList selectList) { + boolean requiresCasts = false; + for (SqlNode expression : selectList.getSons()) { + if (nodeRequiresCast(expression)) { + requiresCasts = true; + } + } + return requiresCasts; + } +} diff --git a/jdbc-adapter/virtualschema-jdbc-adapter/src/main/java/com/exasol/adapter/jdbc/ColumnAdapterNotes.java b/jdbc-adapter/virtualschema-jdbc-adapter/src/main/java/com/exasol/adapter/jdbc/ColumnAdapterNotes.java new file mode 100644 index 000000000..8c8680ac8 --- /dev/null +++ b/jdbc-adapter/virtualschema-jdbc-adapter/src/main/java/com/exasol/adapter/jdbc/ColumnAdapterNotes.java @@ -0,0 +1,65 @@ +package com.exasol.adapter.jdbc; + +import com.exasol.adapter.metadata.ColumnMetadata; +import com.exasol.utils.JsonHelper; + +import javax.json.JsonBuilderFactory; +import javax.json.JsonObject; +import javax.json.JsonObjectBuilder; + +/** + * Serializes and deserializes the column adapter notes specific to the JDBC Adapter + */ +public class ColumnAdapterNotes { + + private int jdbcDataType; + private String typeName; + + public ColumnAdapterNotes(int jdbcDataType, String typeName) { + this.jdbcDataType = jdbcDataType; + this.typeName = typeName; + } + + public int getJdbcDataType() { + return jdbcDataType; + } + + public String getTypeName() { + return typeName; + } + + public static String serialize(ColumnAdapterNotes notes) { + JsonBuilderFactory factory = JsonHelper.getBuilderFactory(); + JsonObjectBuilder builder = factory.createObjectBuilder() + .add("jdbcDataType", notes.getJdbcDataType()) + .add("typeName", notes.getTypeName()); + return builder.build().toString(); + } + + public static ColumnAdapterNotes deserialize(String columnAdapterNotes, String columnName) { + if (columnAdapterNotes == null || columnAdapterNotes.isEmpty()) { + throw new RuntimeException(getException(columnName)); + } + JsonObject root; + try { + root = JsonHelper.getJsonObject(columnAdapterNotes); + } catch (Exception ex) { + throw new RuntimeException(getException(columnName)); + } + checkKey(root, "jdbcDataType", columnName); + checkKey(root, "typeName", columnName); + return new ColumnAdapterNotes( + root.getInt("jdbcDataType"), + root.getString("typeName")); + } + + private static void checkKey(JsonObject root, String key, String columnName) { + if (!root.containsKey(key)) { + throw new RuntimeException(getException(columnName)); + } + } + + private static String getException(String columnName) { + return "The adapternotes field of column " + columnName + " could not be parsed. Please refresh the virtual schema."; + } +} diff --git a/jdbc-adapter/virtualschema-jdbc-adapter/src/main/java/com/exasol/adapter/jdbc/JdbcAdapter.java b/jdbc-adapter/virtualschema-jdbc-adapter/src/main/java/com/exasol/adapter/jdbc/JdbcAdapter.java new file mode 100644 index 000000000..aed263f4b --- /dev/null +++ b/jdbc-adapter/virtualschema-jdbc-adapter/src/main/java/com/exasol/adapter/jdbc/JdbcAdapter.java @@ -0,0 +1,233 @@ +package com.exasol.adapter.jdbc; + +import com.exasol.ExaConnectionInformation; +import com.exasol.ExaMetadata; +import com.exasol.adapter.capabilities.*; +import com.exasol.adapter.dialects.*; +import com.exasol.adapter.dialects.impl.*; +import com.exasol.adapter.json.RequestJsonParser; +import com.exasol.adapter.json.ResponseJsonSerializer; +import com.exasol.adapter.metadata.SchemaMetadata; +import com.exasol.adapter.metadata.SchemaMetadataInfo; +import com.exasol.adapter.request.*; +import com.exasol.utils.JsonHelper; +import com.exasol.utils.UdfUtils; +import com.google.common.collect.ImmutableList; + +import java.sql.SQLException; +import java.util.List; +import java.util.Map; + +public class JdbcAdapter { + + final static SqlDialects supportedDialects; + static { + supportedDialects = new SqlDialects( + ImmutableList.of( + GenericSqlDialect.NAME, + ExasolSqlDialect.NAME, + ImpalaSqlDialect.NAME, + OracleSqlDialect.NAME)); + } + + /** + * This method gets called by the database during interactions with the + * virtual schema. + * + * @param meta + * Metadata object + * @param input + * json request, as defined in the Adapter Script API + * @return json response, as defined in the Adapter Script API + */ + public static String adapterCall(ExaMetadata meta, String input) throws Exception { + String result = ""; + try { + AdapterRequest request = new RequestJsonParser().parseRequest(input); + tryAttachToOutputService(request.getSchemaMetadataInfo()); + System.out.println("----------\nAdapter Request:\n----------\n" + input); + + switch (request.getType()) { + case CREATE_VIRTUAL_SCHEMA: + result = handleCreateVirtualSchema((CreateVirtualSchemaRequest)request, meta); + break; + case DROP_VIRTUAL_SCHEMA: + result = handleDropVirtualSchema((DropVirtualSchemaRequest)request); + break; + case REFRESH: + result = handleRefresh((RefreshRequest)request, meta); + break; + case SET_PROPERTIES: + result = handleSetProperty((SetPropertiesRequest)request, meta); + break; + case GET_CAPABILITIES: + result = handleGetCapabilities((GetCapabilitiesRequest)request); + break; + case PUSHDOWN: + result = handlePushdownRequest((PushdownRequest)request, meta); + break; + default: + throw new RuntimeException("Request Type not supported: " + request.getType()); + } + assert(result.isEmpty()); + System.out.println("----------\nResponse:\n----------\n" + JsonHelper.prettyJson(JsonHelper.getJsonObject(result))); + return result; + } catch (Exception ex) { + String stacktrace = UdfUtils.traceToString(ex); + throw new Exception("Error in Adapter: " + ex.getMessage() + "\nStacktrace: " + stacktrace + "\nFor following request: " + input + "\nResponse: " + result); + } + } + + private static String handleCreateVirtualSchema(CreateVirtualSchemaRequest request, ExaMetadata meta) throws SQLException { + JdbcAdapterProperties.checkPropertyConsistency(request.getSchemaMetadataInfo().getProperties(), supportedDialects); + SchemaMetadata remoteMeta = readMetadata(request.getSchemaMetadataInfo(), meta); + return ResponseJsonSerializer.makeCreateVirtualSchemaResponse(remoteMeta); + } + + private static SchemaMetadata readMetadata(SchemaMetadataInfo schemaMeta, ExaMetadata meta) throws SQLException { + List tables = JdbcAdapterProperties.getTableFilter(schemaMeta.getProperties()); + return readMetadata(schemaMeta, tables, meta); + } + + private static SchemaMetadata readMetadata(SchemaMetadataInfo meta, List tables, ExaMetadata exaMeta) throws SQLException { + // Connect via JDBC and read metadata + ExaConnectionInformation connection = JdbcAdapterProperties.getConnectionInformation(meta.getProperties(), exaMeta); + String catalog = JdbcAdapterProperties.getCatalog(meta.getProperties()); + String schema = JdbcAdapterProperties.getSchema(meta.getProperties()); + return JdbcMetadataReader.readRemoteMetadata( + connection.getAddress(), + connection.getUser(), + connection.getPassword(), + catalog, + schema, + tables, + supportedDialects, + JdbcAdapterProperties.getSqlDialectName(meta.getProperties(), supportedDialects)); + } + + private static String handleRefresh(RefreshRequest request, ExaMetadata meta) throws SQLException { + SchemaMetadata remoteMeta; + JdbcAdapterProperties.checkPropertyConsistency(request.getSchemaMetadataInfo().getProperties(), supportedDialects); + if (request.isRefreshForTables()) { + List tables = request.getTables(); + remoteMeta = readMetadata(request.getSchemaMetadataInfo(), tables, meta); + } else { + remoteMeta = readMetadata(request.getSchemaMetadataInfo(), meta); + } + return ResponseJsonSerializer.makeRefreshResponse(remoteMeta); + } + + private static String handleSetProperty(SetPropertiesRequest request, ExaMetadata exaMeta) throws SQLException { + Map changedProperties = request.getProperties(); + Map newSchemaMeta = JdbcAdapterProperties.getNewProperties( + request.getSchemaMetadataInfo().getProperties(), changedProperties); + JdbcAdapterProperties.checkPropertyConsistency(newSchemaMeta, supportedDialects); + if (JdbcAdapterProperties.isRefreshNeeded(changedProperties)) { + ExaConnectionInformation connection = JdbcAdapterProperties.getConnectionInformation(newSchemaMeta, exaMeta); + List tableFilter = JdbcAdapterProperties.getTableFilter(newSchemaMeta); + SchemaMetadata remoteMeta = JdbcMetadataReader.readRemoteMetadata( + connection.getAddress(), + connection.getUser(), + connection.getPassword(), + JdbcAdapterProperties.getCatalog(newSchemaMeta), + JdbcAdapterProperties.getSchema(newSchemaMeta), + tableFilter, + supportedDialects, + JdbcAdapterProperties.getSqlDialectName(newSchemaMeta, supportedDialects)); + return ResponseJsonSerializer.makeSetPropertiesResponse(remoteMeta); + } + return ResponseJsonSerializer.makeSetPropertiesResponse(null); + } + + private static String handleDropVirtualSchema(DropVirtualSchemaRequest request) { + return ResponseJsonSerializer.makeDropVirtualSchemaResponse(); + } + + public static String handleGetCapabilities(GetCapabilitiesRequest request) { + SqlDialectContext dialectContext = new SqlDialectContext(SchemaAdapterNotes.deserialize(request.getSchemaMetadataInfo().getAdapterNotes(), request.getSchemaMetadataInfo().getSchemaName())); + SqlDialect dialect = JdbcAdapterProperties.getSqlDialect(request.getSchemaMetadataInfo().getProperties(), supportedDialects, dialectContext); + Capabilities capabilities = dialect.getCapabilities(); + Capabilities excludedCapabilities = parseExcludedCapabilities( + JdbcAdapterProperties.getExcludedCapabilities(request.getSchemaMetadataInfo().getProperties())); + capabilities.subtractCapabilities(excludedCapabilities); + return ResponseJsonSerializer.makeGetCapabilitiesResponse(capabilities); + } + + private static Capabilities parseExcludedCapabilities(String excludedCapabilitiesStr) { + System.out.println("Excluded Capabilities: " + excludedCapabilitiesStr); + Capabilities excludedCapabilities = new Capabilities(); + for (String cap : excludedCapabilitiesStr.split(",")) { + if (cap.trim().isEmpty()) { + continue; + } + if (cap.startsWith(ResponseJsonSerializer.LITERAL_PREFIX)) { + String literalCap = cap.replaceFirst(ResponseJsonSerializer.LITERAL_PREFIX, ""); + excludedCapabilities.supportLiteral(LiteralCapability.valueOf(literalCap)); + } else if (cap.startsWith(ResponseJsonSerializer.AGGREGATE_FUNCTION_PREFIX)) { + // Aggregate functions must be checked before scalar functions + String aggregateFunctionCap = cap.replaceFirst(ResponseJsonSerializer.AGGREGATE_FUNCTION_PREFIX, ""); + excludedCapabilities.supportAggregateFunction(AggregateFunctionCapability.valueOf(aggregateFunctionCap)); + } else if (cap.startsWith(ResponseJsonSerializer.SCALAR_FUNCTION_PREFIX)) { + String scalarFunctionCap = cap.replaceFirst(ResponseJsonSerializer.SCALAR_FUNCTION_PREFIX, ""); + excludedCapabilities.supportScalarFunction(ScalarFunctionCapability.valueOf(scalarFunctionCap)); + } else { + // High Level Capability + excludedCapabilities.supportMainCapability(MainCapability.valueOf(cap)); + } + } + return excludedCapabilities; + } + + private static String handlePushdownRequest(PushdownRequest request, ExaMetadata exaMeta) { + // Generate SQL pushdown query + SchemaMetadataInfo meta = request.getSchemaMetadataInfo(); + SqlDialectContext dialectContext = new SqlDialectContext(SchemaAdapterNotes.deserialize(request.getSchemaMetadataInfo().getAdapterNotes(), request.getSchemaMetadataInfo().getSchemaName())); + SqlDialect dialect = JdbcAdapterProperties.getSqlDialect(request.getSchemaMetadataInfo().getProperties(), supportedDialects, dialectContext); + SqlGenerationContext context = new SqlGenerationContext(JdbcAdapterProperties.getCatalog(meta.getProperties()), JdbcAdapterProperties.getSchema(meta.getProperties()), JdbcAdapterProperties.isLocal(meta.getProperties())); + SqlGenerationVisitor sqlGeneratorVisitor = dialect.getSqlGenerationVisitor(context); + String pushdownQuery = request.getSelect().accept(sqlGeneratorVisitor); + + boolean isLocal = JdbcAdapterProperties.isLocal(meta.getProperties()); + String credentialsAndConn = ""; + if (JdbcAdapterProperties.userSpecifiedConnection(meta.getProperties())) { + credentialsAndConn = "AT " + JdbcAdapterProperties.getConnectionName(meta.getProperties()); + } else { + ExaConnectionInformation connection = JdbcAdapterProperties.getConnectionInformation(meta.getProperties(), exaMeta); + if (JdbcAdapterProperties.isImportFromExa(meta.getProperties())) { + credentialsAndConn = "AT '" + JdbcAdapterProperties.getExaConnectionString(meta.getProperties()) + "'"; + } else { + credentialsAndConn = "AT '" + connection.getAddress() + "'"; + } + credentialsAndConn += " USER '" + connection.getUser() + "'"; + credentialsAndConn += " IDENTIFIED BY '" + connection.getPassword() + "'"; + } + String importSql; + boolean importFromExa = JdbcAdapterProperties.isImportFromExa(meta.getProperties()); + if (isLocal) { + importSql = pushdownQuery; + } else if (importFromExa) { + importSql = "IMPORT FROM EXA " + credentialsAndConn + + " STATEMENT '" + pushdownQuery.replace("'", "''") + "'"; + } else { + importSql = "IMPORT FROM JDBC " + credentialsAndConn + + " STATEMENT '" + pushdownQuery.replace("'", "''") + "'"; + } + + return ResponseJsonSerializer.makePushdownResponse(importSql); + } + + // Forward stdout to an external output service + private static void tryAttachToOutputService(SchemaMetadataInfo meta) { + String debugAddress = JdbcAdapterProperties.getDebugAddress(meta.getProperties()); + if (!debugAddress.isEmpty()) { + try { + String debugHost = debugAddress.split(":")[0]; + int debugPort = Integer.parseInt(debugAddress.split(":")[1]); + UdfUtils.tryAttachToOutputService(debugHost, debugPort); + } catch (Exception ex) { + throw new RuntimeException("You have to specify a valid hostname and port for the udf debug service, e.g. 'hostname:3000'"); + } + } + } + +} diff --git a/jdbc-adapter/virtualschema-jdbc-adapter/src/main/java/com/exasol/adapter/jdbc/JdbcAdapterProperties.java b/jdbc-adapter/virtualschema-jdbc-adapter/src/main/java/com/exasol/adapter/jdbc/JdbcAdapterProperties.java new file mode 100644 index 000000000..6700018e5 --- /dev/null +++ b/jdbc-adapter/virtualschema-jdbc-adapter/src/main/java/com/exasol/adapter/jdbc/JdbcAdapterProperties.java @@ -0,0 +1,256 @@ +package com.exasol.adapter.jdbc; + +import com.exasol.ExaConnectionAccessException; +import com.exasol.ExaConnectionInformation; +import com.exasol.ExaMetadata; +import com.exasol.adapter.dialects.SqlDialect; +import com.exasol.adapter.dialects.SqlDialectContext; +import com.exasol.adapter.dialects.SqlDialects; + +import java.util.*; + +/** + * Class to expose a nice interface to properties. Casts to the correct data types, checks for valid property values and consistency. + */ +public class JdbcAdapterProperties { + + // One of the following needs to be set + static final String PROP_CATALOG_NAME = "CATALOG_NAME"; + static final String PROP_SCHEMA_NAME = "SCHEMA_NAME"; + static final String PROP_CONNECTION_NAME = "CONNECTION_NAME"; + static final String PROP_CONNECTION_STRING = "CONNECTION_STRING"; + static final String PROP_USERNAME = "USERNAME"; + static final String PROP_PASSWORD = "PASSWORD"; + + // Optional Parameters + static final String PROP_TABLES = "TABLE_FILTER"; + static final String PROP_DEBUG_ADDRESS = "DEBUG_ADDRESS"; + static final String PROP_IS_LOCAL = "IS_LOCAL"; + static final String PROP_SQL_DIALECT = "SQL_DIALECT"; + static final String PROP_IMPORT_FROM_EXA = "IMPORT_FROM_EXA"; + static final String PROP_EXA_CONNECTION_STRING = "EXA_CONNECTION_STRING"; + static final String PROP_EXCLUDED_CAPABILITIES = "EXCLUDED_CAPABILITIES"; + + private static String getProperty(Map properties, String name, String defaultValue) { + if (properties.containsKey(name)) { + return properties.get(name); + } else { + return defaultValue; + } + } + + public static String getCatalog(Map properties) { + return getProperty(properties, PROP_CATALOG_NAME, ""); + } + + public static String getSchema(Map properties) { + return getProperty(properties, PROP_SCHEMA_NAME, ""); + } + + public static boolean userSpecifiedConnection(Map properties) { + String connName = getProperty(properties, PROP_CONNECTION_NAME, ""); + return (connName != null && !connName.isEmpty()); + } + + public static String getConnectionName(Map properties) { + String connName = getProperty(properties, PROP_CONNECTION_NAME, ""); + assert(connName != null && !connName.isEmpty()); + return connName; + } + + /** + * Returns the credentials for the remote system. These are either directly specified + * in the properties or obtained from a connection (requires privilege to access the connection + * . + */ + public static ExaConnectionInformation getConnectionInformation(Map properties, ExaMetadata exaMeta) { + String connName = getProperty(properties, PROP_CONNECTION_NAME, ""); + if (connName != null && !connName.isEmpty()) { + try { + ExaConnectionInformation connInfo = exaMeta.getConnection(connName); + return connInfo; + } catch (ExaConnectionAccessException e) { + throw new RuntimeException("Could not access the connection information of connection " + connName + ". Error: " + e.toString()); + } + } else { + String connectionString = properties.get(PROP_CONNECTION_STRING); + String user = properties.get(PROP_USERNAME); + String password = properties.get(PROP_PASSWORD); + return new ExaConnectionInformationJdbc(connectionString, user, password); + } + } + + public static void checkPropertyConsistency(Map properties, SqlDialects supportedDialects) { + validatePropertyValues(properties); + + checkMandatoryProperties(properties, supportedDialects); + + if (isImportFromExa(properties)) { + if (getExaConnectionString(properties).isEmpty()) { + throw new RuntimeException("You defined the property " + PROP_IMPORT_FROM_EXA + ", please also define " + PROP_EXA_CONNECTION_STRING); + } + } else { + if (!getExaConnectionString(properties).isEmpty()) { + throw new RuntimeException("You defined the property " + PROP_EXA_CONNECTION_STRING + " without setting " + PROP_IMPORT_FROM_EXA + " to 'TRUE'. This is not allowed"); + } + } + } + + private static void validatePropertyValues(Map properties) { + validateBooleanProperty(properties, PROP_IS_LOCAL); + validateBooleanProperty(properties, PROP_IMPORT_FROM_EXA); + if (properties.containsKey(PROP_DEBUG_ADDRESS)) { + validateDebugOutputAddress(properties.get(PROP_DEBUG_ADDRESS)); + } + } + + private static void validateBooleanProperty(Map properties, String property) { + if (properties.containsKey(property)) { + if (!properties.get(property).toUpperCase().matches("^TRUE$|^FALSE$")) { + throw new RuntimeException("The value '" + properties.get(property) + "' for the property " + property + " is invalid. It has to be either 'true' or 'false' (case insensitive)."); + } + } + } + + private static void validateDebugOutputAddress(String debugAddress) { + if (!debugAddress.isEmpty()) { + String error = "You specified an invalid hostname and port for the udf debug service (" + PROP_DEBUG_ADDRESS + "). Please provide a valid value, e.g. 'hostname:3000'"; + try { + String debugHost = debugAddress.split(":")[0]; + int debugPort = Integer.parseInt(debugAddress.split(":")[1]); + } catch (Exception ex) { + throw new RuntimeException(error); + } + if (debugAddress.split(":").length != 2) { + throw new RuntimeException(error); + } + } + } + + private static void checkMandatoryProperties(Map properties, SqlDialects supportedDialects) { + if (!properties.containsKey(PROP_SQL_DIALECT)) { + throw new RuntimeException("You have to specify the SQL dialect (" + PROP_SQL_DIALECT + "). Available dialects: " + supportedDialects.getDialectsString()); + } + if (!supportedDialects.isSupported(properties.get(PROP_SQL_DIALECT))) { + throw new RuntimeException("SQL Dialect not supported: " + properties.get(PROP_SQL_DIALECT) + ". Available dialects: " + supportedDialects.getDialectsString()); + } + if (properties.containsKey(PROP_CONNECTION_NAME)) { + if (properties.containsKey(PROP_CONNECTION_STRING) || + properties.containsKey(PROP_USERNAME) || + properties.containsKey(PROP_PASSWORD) ) { + throw new RuntimeException("You specified a connection (" + PROP_CONNECTION_NAME + ") and therefore may not specify the properties " + PROP_CONNECTION_STRING + ", " + PROP_USERNAME + " and " + PROP_PASSWORD); + } + } else { + if (!properties.containsKey(PROP_CONNECTION_STRING)) { + throw new RuntimeException("You did not specify a connection (" + PROP_CONNECTION_NAME + ") and therefore have to specify the property " + PROP_CONNECTION_STRING); + } + } + } + + public static boolean isImportFromExa(Map properties) { + return getProperty(properties, PROP_IMPORT_FROM_EXA, "").toUpperCase().equals("TRUE"); + } + + public static List getTableFilter(Map properties) { + String tableNames = getProperty(properties, PROP_TABLES, ""); + if (!tableNames.isEmpty()) { + List tables = Arrays.asList(tableNames.split(",")); + for (int i=0; i(); + } + } + + public static String getExcludedCapabilities(Map properties) { + return getProperty(properties, PROP_EXCLUDED_CAPABILITIES, ""); + } + + public static String getDebugAddress(Map properties) { + return getProperty(properties, PROP_DEBUG_ADDRESS, ""); + } + + public static boolean isLocal(Map properties) { + return getProperty(properties, PROP_IS_LOCAL, "").toUpperCase().equals("TRUE"); + } + + public static String getSqlDialectName(Map properties, SqlDialects supportedDialects) { + return getProperty(properties, PROP_SQL_DIALECT, ""); + } + + public static SqlDialect getSqlDialect(Map properties, SqlDialects supportedDialects, SqlDialectContext dialectContext) { + String dialectName = getProperty(properties, PROP_SQL_DIALECT, ""); + SqlDialect dialect = supportedDialects.getDialectByName(dialectName, dialectContext); + if (dialect == null) { + throw new RuntimeException("SQL Dialect not supported: " + dialectName + " - all dialects: " + supportedDialects.getDialectsString()); + } + return dialect; + } + + public static String getExaConnectionString(Map properties) { + return getProperty(properties, PROP_EXA_CONNECTION_STRING, ""); + } + + public static boolean isRefreshNeeded(Map newProperties) { + return newProperties.containsKey(PROP_CONNECTION_STRING) + || newProperties.containsKey(PROP_CONNECTION_NAME) + || newProperties.containsKey(PROP_USERNAME) + || newProperties.containsKey(PROP_PASSWORD) + || newProperties.containsKey(PROP_SCHEMA_NAME) + || newProperties.containsKey(PROP_CATALOG_NAME) + || newProperties.containsKey(PROP_TABLES); + } + + public static class ExaConnectionInformationJdbc implements ExaConnectionInformation { + + private String address; + private String user; // can be null + private String password; // can be null + + public ExaConnectionInformationJdbc(String address, String user, String password) { + this.address = address; + this.user = user; + this.password = password; + } + + @Override + public ConnectionType getType() { + return ConnectionType.PASSWORD; + } + + @Override + public String getAddress() { + return this.address; + } + + @Override + public String getUser() { + return this.user; + } + + @Override + public String getPassword() { + return this.password; + } + } + + /** + * Returns the properties as they would be after successfully applying the changes to the existing (old) set of properties. + */ + public static Map getNewProperties ( + Map oldProperties, Map changedProperties) { + Map newCompleteProperties = new HashMap<>(oldProperties); + for (Map.Entry changedProperty : changedProperties.entrySet()) { + if (changedProperty.getValue() == null) { + // Null values represent properties which are deleted by the user (might also have never existed actually) + newCompleteProperties.remove(changedProperty.getKey()); + } else { + newCompleteProperties.put(changedProperty.getKey(), changedProperty.getValue()); + } + } + return newCompleteProperties; + } + +} diff --git a/jdbc-adapter/virtualschema-jdbc-adapter/src/main/java/com/exasol/adapter/jdbc/JdbcMetadataReader.java b/jdbc-adapter/virtualschema-jdbc-adapter/src/main/java/com/exasol/adapter/jdbc/JdbcMetadataReader.java new file mode 100644 index 000000000..2920359c7 --- /dev/null +++ b/jdbc-adapter/virtualschema-jdbc-adapter/src/main/java/com/exasol/adapter/jdbc/JdbcMetadataReader.java @@ -0,0 +1,283 @@ +package com.exasol.adapter.jdbc; + +import com.exasol.adapter.dialects.SqlDialect; +import com.exasol.adapter.dialects.SqlDialectContext; +import com.exasol.adapter.dialects.SqlDialects; +import com.exasol.adapter.metadata.ColumnMetadata; +import com.exasol.adapter.metadata.SchemaMetadata; +import com.exasol.adapter.metadata.TableMetadata; +import com.google.common.base.Joiner; + +import java.sql.*; +import java.util.ArrayList; +import java.util.List; + +/** + * TODO Find good solutions to handle tables with unsupported data types, or tables that generate exceptions. Ideas: Skip such tables by adding a boolean property like IGNORE_INVALID_TABLES. + */ +public class JdbcMetadataReader { + + public static SchemaMetadata readRemoteMetadata(String connectionString, + String user, + String password, + String catalog, + String schema, + List tableFilter, + SqlDialects dialects, + String dialectName) throws SQLException { + assert (catalog != null); + assert (schema != null); + try { + Connection conn = establishConnection(connectionString, user, password); + DatabaseMetaData dbMeta = conn.getMetaData(); + + // Retrieve relevant parts of DatabaseMetadata. Will be cached in adapternotes of the schema. + SchemaAdapterNotes schemaAdapterNotes = new SchemaAdapterNotes( + dbMeta.getCatalogSeparator(), + dbMeta.getIdentifierQuoteString(), + dbMeta.storesLowerCaseIdentifiers(), + dbMeta.storesUpperCaseIdentifiers(), + dbMeta.storesMixedCaseIdentifiers(), + dbMeta.supportsMixedCaseIdentifiers(), + dbMeta.storesLowerCaseQuotedIdentifiers(), + dbMeta.storesUpperCaseQuotedIdentifiers(), + dbMeta.storesMixedCaseQuotedIdentifiers(), + dbMeta.supportsMixedCaseQuotedIdentifiers(), + dbMeta.nullsAreSortedAtEnd(), + dbMeta.nullsAreSortedAtStart(), + dbMeta.nullsAreSortedHigh(), + dbMeta.nullsAreSortedLow()); + + SqlDialect dialect = dialects.getDialectByName(dialectName, new SqlDialectContext(schemaAdapterNotes)); + + catalog = findCatalog(catalog, dbMeta, dialect); + + schema = findSchema(schema, dbMeta, dialect); + + List tables = findTables(catalog, schema, tableFilter, dbMeta, dialect); + + conn.close(); + return new SchemaMetadata(SchemaAdapterNotes.serialize(schemaAdapterNotes), tables); + } catch (SQLException e) { + e.printStackTrace(); + throw e; + } + } + + private static Connection establishConnection(String connectionString, String user, String password) throws SQLException { + System.out.println("conn: " + connectionString); + + java.util.Properties info = new java.util.Properties(); + if (user != null) { + info.put("user", user); + } + if (password != null) { + info.put("password", password); + } + if (KerberosUtils.isKerberosAuth(password)) { + try { + KerberosUtils.configKerberos(user, password); + } + catch (Exception e) { + e.printStackTrace(); + throw new RuntimeException("Error configuring Kerberos: " + e.getMessage(), e); + } + } + return DriverManager.getConnection(connectionString, info); + } + + private static String findCatalog(String catalog, DatabaseMetaData dbMeta, SqlDialect dialect) throws SQLException { + boolean foundCatalog = false; + String curCatalog = ""; + int numCatalogs = 0; + List allCatalogs = new ArrayList<>(); + try { + ResultSet res = dbMeta.getCatalogs(); + while (res.next()) { + curCatalog = res.getString("TABLE_CAT"); // EXA_DB in case of EXASOL + allCatalogs.add(curCatalog); + if (curCatalog.equals(catalog)) { + foundCatalog = true; + } + ++ numCatalogs; + } + } catch (Exception ex) { + if (dialect.supportsJdbcCatalogs() == SqlDialect.SchemaOrCatalogSupport.SUPPORTED) { + throw new RuntimeException("Unexpected exception when accessing the catalogs: " + ex.getMessage(), ex); + } else if (dialect.supportsJdbcCatalogs() == SqlDialect.SchemaOrCatalogSupport.UNSUPPORTED) { + // Ignore this error + ex.printStackTrace(); + return null; + } else { + // We don't know if system supports catalogs. If user specified an catalog, we have a problem, otherwise we ignore the error + if (!catalog.isEmpty()) { + throw new RuntimeException("Unexpected exception when accessing the catalogs: " + ex.getMessage(), ex); + } else { + ex.printStackTrace(); + return null; + } + } + } + if (dialect.supportsJdbcCatalogs() == SqlDialect.SchemaOrCatalogSupport.SUPPORTED + || dialect.supportsJdbcCatalogs() == SqlDialect.SchemaOrCatalogSupport.UNKNOWN) { + if (foundCatalog) { + return catalog; + } else { + if (catalog.isEmpty()) { + if (dialect.supportsJdbcCatalogs() == SqlDialect.SchemaOrCatalogSupport.SUPPORTED) { + throw new RuntimeException("You have to specify a catalog. Available catalogs: " + Joiner.on(", ").join(allCatalogs)); + } else { + if (numCatalogs == 0) { + return null; + } else { + throw new RuntimeException("You have to specify a catalog. Available catalogs: " + Joiner.on(", ").join(allCatalogs)); + } + } + } else { + throw new RuntimeException("Catalog " + catalog + " does not exist. Available catalogs: " + Joiner.on(", ").join(allCatalogs)); + } + } + } else { + assert(dialect.supportsJdbcCatalogs() == SqlDialect.SchemaOrCatalogSupport.UNSUPPORTED); + if (catalog.isEmpty()) { + if (numCatalogs == 0) { + return null; + } else if (numCatalogs == 1) { + // Take the one and only catalog (in case of EXASOL this is always EXA_DB). Returning null would probably also work fine. + return curCatalog; + } else { + throw new RuntimeException("Error: The data source is not expected to support catalogs, but has " + numCatalogs + " catalogs: " + Joiner.on(", ").join(allCatalogs)); + } + } else { + throw new RuntimeException("You specified a catalog, however the data source does not support the concept of catalogs."); + } + } + } + + private static String findSchema(String schema, DatabaseMetaData dbMeta, SqlDialect dialect) throws SQLException { + // Check if schema exists + boolean foundSchema = false; + List allSchemas = new ArrayList<>(); + int numSchemas = 0; + String curSchema = ""; + try { + ResultSet schemas = dbMeta.getSchemas(); + while (schemas.next()) { + curSchema = schemas.getString("TABLE_SCHEM"); + allSchemas.add(curSchema); + if (curSchema.equals(schema)) { + foundSchema = true; + } + ++numSchemas; + } + } catch (Exception ex) { + if (dialect.supportsJdbcSchemas() == SqlDialect.SchemaOrCatalogSupport.SUPPORTED) { + throw new RuntimeException("Unexpected exception when accessing the schema: " + ex.getMessage(), ex); + } else if (dialect.supportsJdbcSchemas() == SqlDialect.SchemaOrCatalogSupport.UNSUPPORTED) { + // Ignore this error + ex.printStackTrace(); + return null; + } else { + // We don't know if system supports schemas. + if (!schema.isEmpty()) { + throw new RuntimeException("Unexpected exception when accessing the schemas: " + ex.getMessage(), ex); + } else { + ex.printStackTrace(); + return null; + } + } + } + if (dialect.supportsJdbcSchemas() == SqlDialect.SchemaOrCatalogSupport.SUPPORTED + || dialect.supportsJdbcSchemas() == SqlDialect.SchemaOrCatalogSupport.UNKNOWN) { + if (foundSchema) { + return schema; + } else { + if (schema.isEmpty()) { + if (dialect.supportsJdbcSchemas() == SqlDialect.SchemaOrCatalogSupport.SUPPORTED) { + throw new RuntimeException("You have to specify a schema. Available schemas: " + Joiner.on(", ").join(allSchemas)); + } else { + if (numSchemas == 0) { + return null; + } else { + throw new RuntimeException("You have to specify a schema. Available schemas: " + Joiner.on(", ").join(allSchemas)); + } + } + } else { + throw new RuntimeException("Schema " + schema + " does not exist. Available schemas: " + Joiner.on(", ").join(allSchemas)); + } + } + } else { + assert(dialect.supportsJdbcSchemas() == SqlDialect.SchemaOrCatalogSupport.UNSUPPORTED); + if (schema.isEmpty()) { + if (numSchemas == 0) { + return null; + } else if (numSchemas == 1) { + // Take the one and only schema. Returning null would probably also work fine. + return curSchema; + } else { + throw new RuntimeException("Error: The data source is not expected to support schemas, but has " + numSchemas + " schemas: " + Joiner.on(", ").join(allSchemas)); + } + } else { + throw new RuntimeException("You specified a schema, however the data source does not support the concept of schemas."); + } + } + } + + private static List findTables(String catalog, String schema, List tableFilter, DatabaseMetaData dbMeta, SqlDialect dialect) throws SQLException { + List tables = new ArrayList<>(); + ResultSet resTables = dbMeta.getTables(catalog, schema, null, null); + List tableNames = new ArrayList<>(); + List tableComments = new ArrayList<>(); + while (resTables.next()) { + SqlDialect.MappedTable mappedTable = dialect.mapTable(resTables); + if (!mappedTable.isIgnored()) { + tableNames.add(mappedTable.getTableName()); + tableComments.add(mappedTable.getTableComment()); + } + } + + // Columns + for (int i=0; i columns = readColumns(dbMeta, catalog, schema, table, dialect); + tables.add(new TableMetadata(table, "", columns, tableComments.get(i))); + } catch (Exception ex) { + throw new RuntimeException("Exception for table " + table, ex); + } + } + return tables; + } + + private static boolean identifiersAreCaseInsensitive(SqlDialect dialect) { + return (dialect.getQuotedIdentifierHandling() == dialect.getUnquotedIdentifierHandling()) + && dialect.getQuotedIdentifierHandling() != SqlDialect.IdentifierCaseHandling.INTERPRET_CASE_SENSITIVE; + } + + private static List readColumns(DatabaseMetaData dbMeta, String catalog, String schema, String table, SqlDialect dialect) throws SQLException { + ResultSet cols = dbMeta.getColumns(catalog, schema, table, null); + List columns = new ArrayList<>(); + while (cols.next()) { + columns.add(dialect.mapColumn(cols)); + } + if (columns.isEmpty()) { System.out.println("Warning: Found a table without columns: " + table); } + return columns; + } +} diff --git a/jdbc-adapter/virtualschema-jdbc-adapter/src/main/java/com/exasol/adapter/jdbc/KerberosUtils.java b/jdbc-adapter/virtualschema-jdbc-adapter/src/main/java/com/exasol/adapter/jdbc/KerberosUtils.java new file mode 100644 index 000000000..48ac72dc2 --- /dev/null +++ b/jdbc-adapter/virtualschema-jdbc-adapter/src/main/java/com/exasol/adapter/jdbc/KerberosUtils.java @@ -0,0 +1,87 @@ +package com.exasol.adapter.jdbc; + +import javax.xml.bind.DatatypeConverter; +import java.io.File; +import java.io.FileOutputStream; +import java.nio.charset.Charset; + +/** + * Utility class to establish JDBC connections with Kerberos authentication + */ +public class KerberosUtils { + + private static final String krbKey = "ExaAuthType=Kerberos;"; + + public static boolean isKerberosAuth(String pass) { + if (pass==null) { + return false; + } + return pass.indexOf(krbKey) == 0; + } + + public static void configKerberos(String user, String pass) throws Exception { + try { + pass = pass.replaceFirst(krbKey, ""); + } catch (Exception e) { + throw new RuntimeException("Could not find " + krbKey + " in password: " + e.getMessage()); + } + String[] confKeytab = pass.split(";"); + if (confKeytab.length != 2) + throw new RuntimeException("Invalid Kerberos conf/keytab"); + File kerberosBaseDir = new File("/tmp"); + File krbDir = File.createTempFile("kerberos_", null, kerberosBaseDir); + krbDir.delete(); + krbDir.mkdir(); + krbDir.deleteOnExit(); + String krbConfPath = writeKrbConf(krbDir, confKeytab[0]); + String keytabPath = writeKeytab(krbDir, confKeytab[1]); + String jaasConfigPath = writeJaasConfig(krbDir, user, keytabPath); + System.setProperty("java.security.auth.login.config", jaasConfigPath); + System.setProperty("java.security.krb5.conf", krbConfPath); + System.setProperty("javax.security.auth.useSubjectCredsOnly", "false"); + } + + private static String writeKrbConf(File krbDir, String base64Conf) throws Exception { + File file = File.createTempFile("krb_", ".conf", krbDir); + file.deleteOnExit(); + FileOutputStream os = new FileOutputStream(file); + os.write(DatatypeConverter.parseBase64Binary(base64Conf)); + os.close(); + return file.getCanonicalPath(); + } + + private static String writeKeytab(File krbDir, String base64Keytab) throws Exception { + File file = File.createTempFile("kt_", ".keytab", krbDir); + file.deleteOnExit(); + FileOutputStream os = new FileOutputStream(file); + os.write(DatatypeConverter.parseBase64Binary(base64Keytab)); + os.close(); + return file.getCanonicalPath(); + } + + private static String writeJaasConfig(File krbDir, String princ, String keytabPath) throws Exception { + File file = File.createTempFile("jaas_", ".conf", krbDir); + file.deleteOnExit(); + String jaasData; + jaasData = "Client {\n"; + jaasData += "com.sun.security.auth.module.Krb5LoginModule required\n"; + jaasData += "principal=\"" + princ + "\"\n"; + jaasData += "useKeyTab=true\n"; + jaasData += "keyTab=\"" + keytabPath + "\"\n"; + jaasData += "doNotPrompt=true\n"; + jaasData += "useTicketCache=false;\n"; + jaasData += "};\n"; + jaasData += "com.sun.security.jgss.initiate {\n"; + jaasData += "com.sun.security.auth.module.Krb5LoginModule required\n"; + jaasData += "principal=\"" + princ + "\"\n"; + jaasData += "useKeyTab=true\n"; + jaasData += "keyTab=\"" + keytabPath + "\"\n"; + jaasData += "doNotPrompt=true\n"; + jaasData += "useTicketCache=false;\n"; + jaasData += "};\n"; + FileOutputStream os = new FileOutputStream(file); + os.write(jaasData.getBytes(Charset.forName("UTF-8"))); + os.close(); + return file.getCanonicalPath(); + } +} diff --git a/jdbc-adapter/virtualschema-jdbc-adapter/src/main/java/com/exasol/adapter/jdbc/SchemaAdapterNotes.java b/jdbc-adapter/virtualschema-jdbc-adapter/src/main/java/com/exasol/adapter/jdbc/SchemaAdapterNotes.java new file mode 100644 index 000000000..5aaa6bb25 --- /dev/null +++ b/jdbc-adapter/virtualschema-jdbc-adapter/src/main/java/com/exasol/adapter/jdbc/SchemaAdapterNotes.java @@ -0,0 +1,216 @@ +package com.exasol.adapter.jdbc; + +import com.exasol.adapter.metadata.SchemaMetadataInfo; +import com.exasol.utils.JsonHelper; + +import javax.json.JsonBuilderFactory; +import javax.json.JsonObject; +import javax.json.JsonObjectBuilder; + +/** + * Holds the schema adapter notes specific to the JDBC Adapter. Also includes functionality to serialize and deserialize. + */ +public class SchemaAdapterNotes { + + // String that this database uses as the separator between a catalog and table name + private String catalogSeparator; + + // string used to quote SQL identifiers + private String identifierQuoteString; + + // treats mixed case unquoted SQL identifiers as case insensitive and stores them in lower case. + private boolean storesLowerCaseIdentifiers; + + // treats mixed case unquoted SQL identifiers as case insensitive and stores them in upper case. TRUE for EXASOL & Oracle + private boolean storesUpperCaseIdentifiers; + + // treats mixed case unquoted SQL identifiers as case insensitive and stores them in mixed case. + private boolean storesMixedCaseIdentifiers; + + // treats mixed case unquoted SQL identifiers as case sensitive and as a result stores them in mixed case. TRUE for EXASOL. + // Seems to be a bug that this is true for EXASOL. + private boolean supportsMixedCaseIdentifiers; + + // treats mixed case quoted SQL identifiers as case insensitive and stores them in lower case. + private boolean storesLowerCaseQuotedIdentifiers; + + // treats mixed case quoted SQL identifiers as case insensitive and stores them in upper case. + private boolean storesUpperCaseQuotedIdentifiers; + + // treats mixed case quoted SQL identifiers as case insensitive and stores them in mixed case. TRUE for Oracle. + // Oracle has this also true (only difference to EXASOL) which is in conflict with supportsMixedCaseQuotedIdentifiers + // which states that mixed case quoted identifiers are treated case sensitive. + private boolean storesMixedCaseQuotedIdentifiers; + + // treats mixed case quoted SQL identifiers as case sensitive and as a result stores them in mixed case. TRUE for EXASOL & Oracle. + private boolean supportsMixedCaseQuotedIdentifiers; + + // NULL values are sorted at the end regardless of sort order + private boolean nullsAreSortedAtEnd; + + // NULL values are sorted at the start regardless of sort order + private boolean nullsAreSortedAtStart; + + // NULL values are sorted high + private boolean nullsAreSortedHigh; + + // NULL values are sorted low + private boolean nullsAreSortedLow; + + public SchemaAdapterNotes(String catalogSeparator, + String identifierQuoteString, + boolean storesLowerCaseIdentifiers, + boolean storesUpperCaseIdentifiers, + boolean storesMixedCaseIdentifiers, + boolean supportsMixedCaseIdentifiers, + boolean storesLowerCaseQuotedIdentifiers, + boolean storesUpperCaseQuotedIdentifiers, + boolean storesMixedCaseQuotedIdentifiers, + boolean supportsMixedCaseQuotedIdentifiers, + boolean nullsAreSortedAtEnd, + boolean nullsAreSortedAtStart, + boolean nullsAreSortedHigh, + boolean nullsAreSortedLow) { + this.catalogSeparator = catalogSeparator; + this.identifierQuoteString = identifierQuoteString; + this.storesLowerCaseIdentifiers = storesLowerCaseIdentifiers; + this.storesUpperCaseIdentifiers = storesUpperCaseIdentifiers; + this.storesMixedCaseIdentifiers = storesMixedCaseIdentifiers; + this.supportsMixedCaseIdentifiers = supportsMixedCaseIdentifiers; + this.storesLowerCaseQuotedIdentifiers = storesLowerCaseQuotedIdentifiers; + this.storesUpperCaseQuotedIdentifiers = storesUpperCaseQuotedIdentifiers; + this.storesMixedCaseQuotedIdentifiers = storesMixedCaseQuotedIdentifiers; + this.supportsMixedCaseQuotedIdentifiers = supportsMixedCaseQuotedIdentifiers; + this.nullsAreSortedAtEnd = nullsAreSortedAtEnd; + this.nullsAreSortedAtStart = nullsAreSortedAtStart; + this.nullsAreSortedHigh = nullsAreSortedHigh; + this.nullsAreSortedLow = nullsAreSortedLow; + } + + public String getCatalogSeparator() { + return catalogSeparator; + } + + public String getIdentifierQuoteString() { + return identifierQuoteString; + } + + public boolean isSupportsMixedCaseIdentifiers() { + return supportsMixedCaseIdentifiers; + } + + public boolean isSupportsMixedCaseQuotedIdentifiers() { + return supportsMixedCaseQuotedIdentifiers; + } + + public boolean isStoresLowerCaseIdentifiers() { + return storesLowerCaseIdentifiers; + } + + public boolean isStoresUpperCaseIdentifiers() { + return storesUpperCaseIdentifiers; + } + + public boolean isStoresMixedCaseIdentifiers() { + return storesMixedCaseIdentifiers; + } + + public boolean isStoresLowerCaseQuotedIdentifiers() { + return storesLowerCaseQuotedIdentifiers; + } + + public boolean isStoresUpperCaseQuotedIdentifiers() { + return storesUpperCaseQuotedIdentifiers; + } + + public boolean isStoresMixedCaseQuotedIdentifiers() { + return storesMixedCaseQuotedIdentifiers; + } + + public boolean isNullsAreSortedAtEnd() { + return nullsAreSortedAtEnd; + } + + public boolean isNullsAreSortedAtStart() { + return nullsAreSortedAtStart; + } + + public boolean isNullsAreSortedHigh() { + return nullsAreSortedHigh; + } + + public boolean isNullsAreSortedLow() { + return nullsAreSortedLow; + } + + public static String serialize(SchemaAdapterNotes notes) { + JsonBuilderFactory factory = JsonHelper.getBuilderFactory(); + JsonObjectBuilder builder = factory.createObjectBuilder() + .add("catalogSeparator", notes.getCatalogSeparator()) + .add("identifierQuoteString", notes.getIdentifierQuoteString()) + .add("storesLowerCaseIdentifiers", notes.isStoresLowerCaseIdentifiers()) + .add("storesUpperCaseIdentifiers", notes.isStoresUpperCaseIdentifiers()) + .add("storesMixedCaseIdentifiers", notes.isStoresMixedCaseIdentifiers()) + .add("supportsMixedCaseIdentifiers", notes.isSupportsMixedCaseIdentifiers()) + .add("storesLowerCaseQuotedIdentifiers", notes.isStoresLowerCaseQuotedIdentifiers()) + .add("storesUpperCaseQuotedIdentifiers", notes.isStoresUpperCaseQuotedIdentifiers()) + .add("storesMixedCaseQuotedIdentifiers", notes.isStoresMixedCaseQuotedIdentifiers()) + .add("supportsMixedCaseQuotedIdentifiers", notes.isSupportsMixedCaseQuotedIdentifiers()) + .add("nullsAreSortedAtEnd", notes.isNullsAreSortedAtEnd()) + .add("nullsAreSortedAtStart", notes.isNullsAreSortedAtStart()) + .add("nullsAreSortedHigh", notes.isNullsAreSortedHigh()) + .add("nullsAreSortedLow", notes.isNullsAreSortedLow()); + return builder.build().toString(); + } + + public static SchemaAdapterNotes deserialize(String adapterNotes, String schemaName) { + if (adapterNotes == null || adapterNotes.isEmpty()) { + throw new RuntimeException(getException(schemaName)); + } + JsonObject root; + try { + root = JsonHelper.getJsonObject(adapterNotes); + } catch (Exception ex) { + throw new RuntimeException(getException(schemaName)); + } + checkKey(root, "catalogSeparator", schemaName); + checkKey(root, "identifierQuoteString", schemaName); + checkKey(root, "storesLowerCaseIdentifiers", schemaName); + checkKey(root, "storesUpperCaseIdentifiers", schemaName); + checkKey(root, "storesMixedCaseIdentifiers", schemaName); + checkKey(root, "supportsMixedCaseIdentifiers", schemaName); + checkKey(root, "storesLowerCaseQuotedIdentifiers", schemaName); + checkKey(root, "storesUpperCaseQuotedIdentifiers", schemaName); + checkKey(root, "storesMixedCaseQuotedIdentifiers", schemaName); + checkKey(root, "supportsMixedCaseQuotedIdentifiers", schemaName); + checkKey(root, "nullsAreSortedAtEnd", schemaName); + checkKey(root, "nullsAreSortedAtStart", schemaName); + checkKey(root, "nullsAreSortedHigh", schemaName); + checkKey(root, "nullsAreSortedLow", schemaName); + return new SchemaAdapterNotes( + root.getString("catalogSeparator"), + root.getString("identifierQuoteString"), + root.getBoolean("storesLowerCaseIdentifiers"), + root.getBoolean("storesUpperCaseIdentifiers"), + root.getBoolean("storesMixedCaseIdentifiers"), + root.getBoolean("supportsMixedCaseIdentifiers"), + root.getBoolean("storesLowerCaseQuotedIdentifiers"), + root.getBoolean("storesUpperCaseQuotedIdentifiers"), + root.getBoolean("storesMixedCaseQuotedIdentifiers"), + root.getBoolean("supportsMixedCaseQuotedIdentifiers"), + root.getBoolean("nullsAreSortedAtEnd"), + root.getBoolean("nullsAreSortedAtStart"), + root.getBoolean("nullsAreSortedHigh"), + root.getBoolean("nullsAreSortedLow")); + } + + private static void checkKey(JsonObject root, String key, String schemaName) { + if (!root.containsKey(key)) { + throw new RuntimeException(getException(schemaName)); + } + } + + private static String getException(String schemaName) { + return "The adapternotes field of schema " + schemaName + " could not be parsed. Please refresh the virtual schema."; + } +} diff --git a/jdbc-adapter/virtualschema-jdbc-adapter/src/main/java/com/exasol/utils/UdfUtils.java b/jdbc-adapter/virtualschema-jdbc-adapter/src/main/java/com/exasol/utils/UdfUtils.java new file mode 100644 index 000000000..480f263cb --- /dev/null +++ b/jdbc-adapter/virtualschema-jdbc-adapter/src/main/java/com/exasol/utils/UdfUtils.java @@ -0,0 +1,26 @@ +package com.exasol.utils; + +import java.io.PrintStream; +import java.io.PrintWriter; +import java.io.StringWriter; +import java.net.Socket; + +public class UdfUtils { + + public static void tryAttachToOutputService(String ip, int port) { + // Start before: udf_debug.py + try { + @SuppressWarnings("resource") + Socket socket = new Socket(ip, port); + PrintStream out = new PrintStream(socket.getOutputStream(), true); + System.setOut(out); + System.out.println("\n\n\nAttached to outputservice"); + } catch (Exception ex) {} // could not start output server} + } + + public static String traceToString(Exception ex) { + StringWriter errors = new StringWriter(); + ex.printStackTrace(new PrintWriter(errors)); + return errors.toString(); + } +} diff --git a/jdbc-adapter/virtualschema-jdbc-adapter/src/test/java/com/exasol/adapter/dialects/AbstractIntegrationTest.java b/jdbc-adapter/virtualschema-jdbc-adapter/src/test/java/com/exasol/adapter/dialects/AbstractIntegrationTest.java new file mode 100644 index 000000000..6668caa41 --- /dev/null +++ b/jdbc-adapter/virtualschema-jdbc-adapter/src/test/java/com/exasol/adapter/dialects/AbstractIntegrationTest.java @@ -0,0 +1,244 @@ +package com.exasol.adapter.dialects; + +import java.io.FileNotFoundException; +import java.sql.*; +import java.util.ArrayList; +import java.util.Arrays; +import java.util.GregorianCalendar; +import java.util.List; +import java.util.regex.Matcher; +import java.util.regex.Pattern; + +import static org.junit.Assert.*; + +public class AbstractIntegrationTest { + + private static Connection connection; + + private static IntegrationTestConfig config; + + public static IntegrationTestConfig getConfig() throws FileNotFoundException { + if (config == null) { + config = new IntegrationTestConfig(); + } + return config; + } + + /** + * You have to call this method with a connection to your EXASOL database during the @BeforeClass method of your integration test + */ + public static void setConnection(Connection connection) { + AbstractIntegrationTest.connection = connection; + } + + public static Connection getConnection() { + return connection; + } + + private static void checkConnection() { + assertNotNull("Error: Your integration test did not set the connection.", connection); + } + + public static Connection connectToExa() throws ClassNotFoundException, SQLException, FileNotFoundException { + String user = config.getExasolUser(); + String password = config.getExasolPassword(); + return connectToExa(user, password); + } + + public static Connection connectToExa(String user, String password) throws ClassNotFoundException, SQLException, FileNotFoundException { + String exaAddress = config.getExasolAddress(); + Class.forName("com.exasol.jdbc.EXADriver"); + return DriverManager.getConnection("jdbc:exa:" + exaAddress, user, password); + } + + public ResultSet executeQuery(Connection conn, String query) throws SQLException { + return conn.createStatement().executeQuery(query); + } + + public ResultSet executeQuery(String query) throws SQLException { + checkConnection(); + return executeQuery(connection, query); + } + + public static void createJDBCAdapter(Connection conn, List jarIncludes) throws SQLException { + Statement stmt = conn.createStatement(); + stmt.execute("CREATE SCHEMA IF NOT EXISTS ADAPTER"); + String sql = "CREATE OR REPLACE JAVA ADAPTER SCRIPT ADAPTER.JDBC_ADAPTER AS\n"; + sql += "%scriptclass com.exasol.adapter.jdbc.JdbcAdapter;"; + for (String includePath : jarIncludes) { + sql += " %jar " + includePath + ";\n"; + } + //sql += " %jvmoption -Xms64m -Xmx64m;"; + sql += "/"; + stmt.execute(sql); + } + + public static void createJDBCAdapter(List jarIncludes) throws SQLException { + checkConnection(); + createJDBCAdapter(connection, jarIncludes); + } + + public static void createVirtualSchema(Connection conn, String virtualSchemaName, String dialect, String remoteCatalog, String remoteSchema, String connectionName, String user, String password, String adapter, String remoteConnectionString, boolean isLocal, String debugAddress, String tableFilter) throws SQLException { + removeVirtualSchema(conn, virtualSchemaName); + String sql = "CREATE VIRTUAL SCHEMA " + virtualSchemaName; + sql += " USING " + adapter; + sql += " WITH"; + if (!connectionName.isEmpty()) { + assertEquals("", remoteConnectionString); + assertEquals("", user); + assertEquals("", password); + sql += " CONNECTION_NAME='" + connectionName + "'"; + } + if (!remoteConnectionString.isEmpty()) { + sql += " CONNECTION_STRING='" + remoteConnectionString + "'"; + } + if (!user.isEmpty()) { + sql += " USERNAME='" + user + "'"; + } + if (!password.isEmpty()) { + sql += " PASSWORD='" + password + "'"; + } + if (!remoteCatalog.isEmpty()) { + sql += " CATALOG_NAME='" + remoteCatalog + "'"; + } + if (!remoteSchema.isEmpty()) { + sql += " SCHEMA_NAME='" + remoteSchema + "'"; + } + sql += " SQL_DIALECT='" + dialect + "'"; + sql += " IS_LOCAL='" + isLocal + "'"; + if (!debugAddress.isEmpty()) { + sql += " DEBUG_ADDRESS='" + debugAddress + "'"; + } + if (!tableFilter.isEmpty()) { + sql += " TABLE_FILTER='" + tableFilter + "'"; + } + conn.createStatement().execute(sql); + } + + public static void createVirtualSchema(String virtualSchemaName, String dialect, String remoteCatalog, String remoteSchema, String connectionName, String user, String password, String adapter, String remoteConnectionString, boolean isLocal, String debugAddress, String tableFilter) throws SQLException { + checkConnection(); + createVirtualSchema(connection, virtualSchemaName, dialect, remoteCatalog, remoteSchema, connectionName, user, password, adapter, remoteConnectionString, isLocal, debugAddress, tableFilter); + } + + public static void createConnection(Connection conn, String connectionName, String connectionString, String user, String password) throws SQLException { + removeConnection(conn, connectionName); + String sql = "CREATE CONNECTION " + connectionName; + sql += " TO '" + connectionString + "'"; + sql += " USER '" + user + "'"; + sql += " IDENTIFIED BY '" + password +"'"; + conn.createStatement().execute(sql); + } + + public static void createConnection(String connectionName, String connectionString, String user, String password) throws SQLException { + checkConnection(); + createConnection(connection, connectionName, connectionString, user, password); + } + + public static String getPortOfConnectedDatabase(Connection conn) throws SQLException { + ResultSet result = conn.createStatement().executeQuery("SELECT PARAM_VALUE FROM EXA_COMMANDLINE where PARAM_NAME = 'port'"); + result.next(); + return result.getString("PARAM_VALUE"); + } + + public static String getPortOfConnectedDatabase() throws SQLException { + checkConnection(); + return getPortOfConnectedDatabase(connection); + } + + public static void matchNextRow(ResultSet result, Object... expectedElements) throws SQLException { + result.next(); + assertEquals(getDiffWithTypes(Arrays.asList(expectedElements), rowToObject(result)), Arrays.asList(expectedElements), rowToObject(result)); + } + + public static void matchLastRow(ResultSet result, Object... expectedElements) throws SQLException { + matchNextRow(result, expectedElements); + assertFalse(result.next()); + } + + private static void removeConnection(Connection conn, String connectionName) throws SQLException { + Statement stmt = conn.createStatement(); + String sql = "DROP CONNECTION IF EXISTS " + connectionName; + stmt.execute(sql); + } + + private static void removeVirtualSchema(Connection conn, String schemaName) throws SQLException { + Statement stmt = conn.createStatement(); + String sql = "DROP VIRTUAL SCHEMA IF EXISTS " + schemaName + " CASCADE"; + stmt.execute(sql); + } + + /** + * This method shows the diff with the types. Normally, only the String representation is shown in the diff, so you cannot distinguish between (int)1 and (long)1. + */ + private static String getDiffWithTypes(List expected, List actual) { + StringBuilder builder = new StringBuilder(); + builder.append("expected elements <["); + boolean first = true; + for (Object element : expected) { + if (!first) { builder.append(", "); } + if (element == null) { + builder.append("null"); + } else { + builder.append("(" + element.getClass().getName() + ")" + element.toString()); + } + first = false; + } + builder.append("]> but was <["); + first = true; + for (Object element : actual) { + if (!first) { builder.append(", "); } + if (element == null) { + builder.append("null"); + } else { + builder.append("(" + element.getClass().getName() + ")" + element.toString()); + } + first = false; + } + builder.append("]>\n"); + return builder.toString(); + } + + public static void matchSingleRowExplain(Connection conn, String query, String expectedExplain) throws SQLException { + ResultSet result = conn.createStatement().executeQuery("EXPLAIN VIRTUAL " + query); + result.next(); + assertEquals(expectedExplain, extractStatementFromImport(result.getString("PUSHDOWN_SQL"))); + assertEquals(false, result.next()); + } + + public static void matchSingleRowExplain(String query, String expectedExplain) throws SQLException { + checkConnection(); + matchSingleRowExplain(connection, query, expectedExplain); + } + + private static List rowToObject(ResultSet resultSet) throws SQLException { + int colCount = resultSet.getMetaData().getColumnCount(); + List res = new ArrayList<>(colCount); + for (int i=1; i<=colCount; ++i) { + String type = (resultSet.getObject(i) == null) ? "null" : resultSet.getObject(i).getClass().getName(); + // System.out.println("- col " + i + " type: " + type); + res.add(resultSet.getObject(i)); + } + return res; + } + + private static String extractStatementFromImport(String importStatement) { + String regexp = " STATEMENT '(.*)'"; + Pattern pattern = Pattern.compile(regexp); + Matcher matcher = pattern.matcher(importStatement); + assertTrue(matcher.find()); + String statement = matcher.group(1); + // Replace double single quotes, e.g. in "IMPORT ... STATEMENT 'SELECT A=''x'' FROM T'"; + return statement.replace("''", "'"); + } + + public Date getSqlDate(int year, int month, int day) { + // Attention: month start with 0! + return new java.sql.Date(new GregorianCalendar(year, month-1, day).getTime().getTime()); + } + + public Timestamp getSqlTimestamp(int year, int month, int day, int hour, int minute, int second, int millisecond) { + // Attention: month start with 0! + return new java.sql.Timestamp(new GregorianCalendar(year, month-1, day, hour, minute, second).getTime().getTime() + millisecond); + } + +} diff --git a/jdbc-adapter/virtualschema-jdbc-adapter/src/test/java/com/exasol/adapter/dialects/CustomSqlGenerationVisitorTest.java b/jdbc-adapter/virtualschema-jdbc-adapter/src/test/java/com/exasol/adapter/dialects/CustomSqlGenerationVisitorTest.java new file mode 100644 index 000000000..fe3895184 --- /dev/null +++ b/jdbc-adapter/virtualschema-jdbc-adapter/src/test/java/com/exasol/adapter/dialects/CustomSqlGenerationVisitorTest.java @@ -0,0 +1,84 @@ +package com.exasol.adapter.dialects; + +import static org.junit.Assert.assertEquals; + +import java.util.ArrayList; +import java.util.Arrays; +import java.util.List; + +import com.exasol.adapter.jdbc.SchemaAdapterNotes; +import com.google.common.collect.ImmutableList; +import com.google.common.collect.Lists; +import org.junit.Test; + +import com.exasol.adapter.dialects.SqlDialect; +import com.exasol.adapter.dialects.SqlGenerationContext; +import com.exasol.adapter.dialects.SqlGenerationVisitor; +import com.exasol.adapter.dialects.impl.ExasolSqlDialect; +import com.exasol.adapter.metadata.ColumnMetadata; +import com.exasol.adapter.metadata.DataType; +import com.exasol.adapter.metadata.TableMetadata; +import com.exasol.adapter.sql.SqlColumn; +import com.exasol.adapter.sql.SqlNode; +import com.exasol.adapter.sql.SqlPredicateNot; +import com.exasol.adapter.sql.SqlSelectList; +import com.exasol.adapter.sql.SqlStatementSelect; +import com.exasol.adapter.sql.SqlTable; +import com.exasol.utils.SqlTestUtil; +import org.mockito.Mockito; + +public class CustomSqlGenerationVisitorTest { + + /** + * This tests uses a SQL with nested expressions (NOT), to make sure that + * the custom sql generation visitor is used for all levels of recursion. + */ + @Test + public void testSqlGenerator() { + SqlNode node = getTestSqlNode(); + String schemaName = "SCHEMA"; + String expectedSql = "SELECT NOT_CUSTOM (NOT_CUSTOM (C1)) FROM " + schemaName + + ".TEST"; + SqlGenerationContext context = new SqlGenerationContext("", schemaName, + false); + SqlDialectContext dialectContext = new SqlDialectContext(Mockito.mock(SchemaAdapterNotes.class)); + SqlGenerationVisitor generator = new TestSqlGenerationVisitor( + new ExasolSqlDialect(dialectContext), context); + String actualSql = node.accept(generator); + assertEquals(SqlTestUtil.normalizeSql(expectedSql), + SqlTestUtil.normalizeSql(actualSql)); + } + + private SqlNode getTestSqlNode() { + TableMetadata clicksMeta = getTestTableMetadata(); + SqlTable fromClause = new SqlTable("TEST", clicksMeta); + SqlSelectList selectList = new SqlSelectList( + ImmutableList.of(new SqlPredicateNot( + new SqlPredicateNot(new SqlColumn(1, clicksMeta + .getColumns().get(0)))))); + return new SqlStatementSelect(fromClause, selectList, null, null, null, + null, null); + } + + private TableMetadata getTestTableMetadata() { + List columns = new ArrayList<>(); + columns.add(new ColumnMetadata("C1", "", DataType.createBool(), true, + false, "", "")); + return new TableMetadata("TEST", "", columns, ""); + } + + public static class TestSqlGenerationVisitor extends SqlGenerationVisitor { + + public TestSqlGenerationVisitor(SqlDialect dialect, + SqlGenerationContext context) { + super(dialect, context); + } + + @Override + public String visit(SqlPredicateNot predicate) { + return "NOT_CUSTOM (" + predicate.getExpression().accept(this) + ")"; + } + + } + +} diff --git a/jdbc-adapter/virtualschema-jdbc-adapter/src/test/java/com/exasol/adapter/dialects/IntegrationTestConfig.java b/jdbc-adapter/virtualschema-jdbc-adapter/src/test/java/com/exasol/adapter/dialects/IntegrationTestConfig.java new file mode 100644 index 000000000..bd3d1d31f --- /dev/null +++ b/jdbc-adapter/virtualschema-jdbc-adapter/src/test/java/com/exasol/adapter/dialects/IntegrationTestConfig.java @@ -0,0 +1,177 @@ +package com.exasol.adapter.dialects; + + +import org.yaml.snakeyaml.Yaml; + +import java.io.File; +import java.io.FileInputStream; +import java.io.FileNotFoundException; +import java.io.InputStream; +import java.util.List; +import java.util.Map; + +public class IntegrationTestConfig { + + Map config; + + public IntegrationTestConfig() throws FileNotFoundException { + this(getMandatorySystemProperty("integrationtest.configfile")); + } + + public IntegrationTestConfig(String configFile) throws FileNotFoundException { + try { + config = loadConfig(configFile); + } catch (FileNotFoundException ex) { + throw new FileNotFoundException("The specified integration test config file could not be found: " + configFile); + } catch (Exception ex) { + throw new RuntimeException("The specified integration test config file could not be parsed: " + configFile, ex); + } + } + + public String getJdbcAdapterPath() { + return getProperty("general", "jdbcAdapterPath"); + } + + public String getScpTargetPath() { + return getProperty("general", "scpTargetPath"); + } + + public boolean exasolTestsRequested() { + return getProperty("exasol", "runIntegrationTests", false); + } + + public String getExasolAddress() { + return getProperty("exasol", "address"); + } + + public String getExasolUser() { + return getProperty("exasol", "user"); + } + + public String getExasolPassword() { + return getProperty("exasol", "password"); + } + + public boolean isDebugOn() { + return getProperty("general", "debug", false); + } + + public String debugAddress() { + return getProperty("general", "debugAddress", ""); + } + + public boolean impalaTestsRequested() { + return getProperty("impala", "runIntegrationTests", false); + } + + public String getImpalaJdbcConnectionString() { + return getProperty("impala", "connectionString"); + } + + public String getImpalaJdbcPrefixPath() { + return getProperty("impala", "jdbcDriverPath"); + } + + public List getImpalaJdbcJars() { + return getProperty("impala", "jdbcDriverJars"); + } + + public boolean kerberosTestsRequested() { + return getProperty("kerberos", "runIntegrationTests", false); + } + + public String getKerberosJdbcConnectionString() { + return getProperty("kerberos", "connectionString"); + } + + public String getKerberosJdbcPrefixPath() { + return getProperty("kerberos", "jdbcDriverPath"); + } + + public String getKerberosUser() { + return getProperty("kerberos", "user"); + } + + public String getKerberosPassword() { + return getProperty("kerberos", "password"); + } + + public List getKerberosJdbcJars() { + return getProperty("kerberos", "jdbcDriverJars"); + } + + public boolean oracleTestsRequested() { + return getProperty("oracle", "runIntegrationTests", false); + } + + public String getOracleJdbcDriverPath() { + return getProperty("oracle", "jdbcDriverPath"); + } + + public String getOracleJdbcConnectionString() { + return getProperty("oracle", "connectionString"); + } + + public String getOracleUser() { + return getProperty("oracle", "user"); + } + + public String getOraclePassword() { + return getProperty("oracle", "password"); + } + + public boolean genericTestsRequested() { + return getProperty("generic", "runIntegrationTests", false); + } + + public String getGenericJdbcDriverPath() { + return getProperty("generic", "jdbcDriverPath"); + } + + public String getGenericJdbcConnectionString() { + return getProperty("generic", "connectionString"); + } + + public String getGenericUser() { + return getProperty("generic", "user"); + } + + public String getGenericPassword() { + return getProperty("generic", "password"); + } + + private Map loadConfig(String configFile) throws FileNotFoundException { + Yaml yaml = new Yaml(); + File file = new File(configFile); + InputStream inputStream = null; + inputStream = new FileInputStream(file); + return (Map) yaml.load(inputStream); + } + + private T getProperty(String section, String key, T defaultValue) { + try { + return getProperty(section, key); + } catch (Exception ex) { + return defaultValue; + } + } + + private T getProperty(String section, String key) { + if (!config.containsKey(section)) { + throw new RuntimeException("Integration test config file has no section '" + section + "'"); + } + Map sectionMap = (Map)config.get(section); + if (!sectionMap.containsKey(key)) { + throw new RuntimeException("Integration test config file has no key '" + key + "' in section '" + section + "'"); + } + return (T)sectionMap.get(key); + } + + private static String getMandatorySystemProperty(String propertyName) { + String value = System.getProperty(propertyName); + if (value == null) { + throw new RuntimeException("Integration tests requires system property '" + propertyName + "' to be set."); + } + return value; + } +} diff --git a/jdbc-adapter/virtualschema-jdbc-adapter/src/test/java/com/exasol/adapter/dialects/IntegrationTestSetup.java b/jdbc-adapter/virtualschema-jdbc-adapter/src/test/java/com/exasol/adapter/dialects/IntegrationTestSetup.java new file mode 100644 index 000000000..f04904303 --- /dev/null +++ b/jdbc-adapter/virtualschema-jdbc-adapter/src/test/java/com/exasol/adapter/dialects/IntegrationTestSetup.java @@ -0,0 +1,43 @@ +package com.exasol.adapter.dialects; + +import com.google.common.collect.ImmutableList; + +import java.io.IOException; +import java.util.List; + +/** + * The main method of this class will be called in the pre-integration-test maven phase before the actual integration-test phase starst. + * Here we have to setup the integration test environment + * + * Attention: This does not deploy the latest jar, because the dist maven module is cleaned and build after this module. Right now you need to do something like "mvn clean package && mvn verify -Pit -D..." to upload and test the latest jar. + */ +public class IntegrationTestSetup { + + public static void main(String[] args) throws IOException, InterruptedException { + + System.out.println("Start setup of the integration test environment"); + String projectVersion = args[0]; + String configFile = args[1]; + + IntegrationTestConfig config = new IntegrationTestConfig(configFile); + + // The local path look like "virtualschema-jdbc-adapter-dist/target/virtualschema-jdbc-adapter-dist-0.0.1-SNAPSHOT.jar" + String artifactDistName = "virtualschema-jdbc-adapter-dist"; + String scpLocalPath = "../" + artifactDistName + "/target/" + artifactDistName + "-" + projectVersion + ".jar"; + String scpTargetPath = config.getScpTargetPath(); + ImmutableList commands = ImmutableList.of("scp", scpLocalPath, scpTargetPath); + runBashCommand(commands); + } + + private static void runBashCommand(List commands) throws IOException, InterruptedException { + System.out.println("EXECUTE command: " + commands); + ProcessBuilder pb = new ProcessBuilder(commands).inheritIO(); + Process process = pb.start(); + process.waitFor(); + System.out.println("Process ended with exit value " + process.exitValue()); + if (process.exitValue() != 0) { + throw new RuntimeException("SCP failed."); + } + } + +} diff --git a/jdbc-adapter/virtualschema-jdbc-adapter/src/test/java/com/exasol/adapter/dialects/SqlDialectTest.java b/jdbc-adapter/virtualschema-jdbc-adapter/src/test/java/com/exasol/adapter/dialects/SqlDialectTest.java new file mode 100644 index 000000000..d661c4969 --- /dev/null +++ b/jdbc-adapter/virtualschema-jdbc-adapter/src/test/java/com/exasol/adapter/dialects/SqlDialectTest.java @@ -0,0 +1,238 @@ +package com.exasol.adapter.dialects; + +import com.exasol.adapter.capabilities.Capabilities; +import com.exasol.adapter.jdbc.SchemaAdapterNotes; +import com.exasol.adapter.metadata.ColumnMetadata; +import com.exasol.adapter.metadata.DataType; +import com.exasol.adapter.metadata.TableMetadata; +import com.exasol.adapter.sql.*; +import com.exasol.utils.SqlTestUtil; +import com.google.common.collect.ImmutableList; +import com.google.common.collect.ImmutableMap; +import org.junit.Test; + +import java.math.BigDecimal; +import java.util.ArrayList; +import java.util.EnumMap; +import java.util.List; +import java.util.Map; + +import static org.junit.Assert.assertEquals; + +public class SqlDialectTest { + + @Test + public void testAggregateFunctionAliases() { + TableMetadata clicksMeta = getTestTableMetadata(); + SqlTable fromClause = new SqlTable("TEST", clicksMeta); + SqlColumn col1 = new SqlColumn(1, clicksMeta.getColumns().get(0)); + SqlSelectList selectList = new SqlSelectList( ImmutableList.of( + new SqlFunctionAggregate(AggregateFunction.APPROXIMATE_COUNT_DISTINCT, ImmutableList.of(col1), false), + new SqlFunctionAggregate(AggregateFunction.AVG, ImmutableList.of(col1), false), + new SqlFunctionAggregate(AggregateFunction.COUNT, new ArrayList(), true), + new SqlFunctionAggregate(AggregateFunction.MAX, ImmutableList.of(col1), false) + )); + SqlNode node = new SqlStatementSelect(fromClause, selectList, null, null, null, null, null); + + String schemaName = "SCHEMA"; + String expectedSql = "SELECT NDV(C1), AVERAGE(C1), COUNT2(DISTINCT *), MAX(C1) FROM " + schemaName + ".TEST"; + + Map aggAliases = new EnumMap<>(AggregateFunction.class); + Map scalarAliases = ImmutableMap.of(); + Map infixAliases = ImmutableMap.of(); + aggAliases.put(AggregateFunction.APPROXIMATE_COUNT_DISTINCT, "NDV"); + aggAliases.put(AggregateFunction.AVG, "AVERAGE"); + aggAliases.put(AggregateFunction.COUNT, "COUNT2"); + Map prefixAliases = ImmutableMap.of(); + + SqlDialect dialect = new AliasesSqlDialect(aggAliases, scalarAliases, infixAliases, prefixAliases); + + SqlGenerationContext context = new SqlGenerationContext("", schemaName, false); + SqlGenerationVisitor generator = new SqlGenerationVisitor(dialect, context); + String actualSql = node.accept(generator); + assertEquals(SqlTestUtil.normalizeSql(expectedSql), SqlTestUtil.normalizeSql(actualSql)); + } + + @Test + public void testScalarFunctionAliases() { + TableMetadata clicksMeta = getTestTableMetadata(); + SqlTable fromClause = new SqlTable("TEST", clicksMeta); + SqlColumn col1 = new SqlColumn(1, clicksMeta.getColumns().get(0)); + SqlSelectList selectList = new SqlSelectList( ImmutableList.of( + new SqlFunctionScalar(ScalarFunction.ABS, ImmutableList.of(col1), false, false), + new SqlFunctionScalar(ScalarFunction.ADD, ImmutableList.of(col1, new SqlLiteralExactnumeric(new BigDecimal(100))), true, false), + new SqlFunctionScalar(ScalarFunction.SUB, ImmutableList.of(col1, new SqlLiteralExactnumeric(new BigDecimal(100))), true, false), + new SqlFunctionScalar(ScalarFunction.TO_CHAR, ImmutableList.of(col1), true, false), + new SqlFunctionScalar(ScalarFunction.NEG, ImmutableList.of(col1), false, false) + )); + SqlNode node = new SqlStatementSelect(fromClause, selectList, null, null, null, null, null); + + String schemaName = "SCHEMA"; + // ADD is infix by default, but must be non-infix after applying the alias. + String expectedSql = "SELECT ABSOLUTE(C1), PLUS(C1, 100), (C1 - 100), TO_CHAR(C1), NEGATIVE(C1) FROM " + schemaName + ".TEST"; + + Map scalarAliases = new EnumMap<>(ScalarFunction.class); + scalarAliases.put(ScalarFunction.ABS, "ABSOLUTE"); + scalarAliases.put(ScalarFunction.ADD, "PLUS"); + scalarAliases.put(ScalarFunction.NEG, "NEGATIVE"); + SqlDialect dialect = new AliasesSqlDialect(ImmutableMap.of(), scalarAliases, ImmutableMap.of(), ImmutableMap.of()); + + SqlGenerationContext context = new SqlGenerationContext("", schemaName, false); + SqlGenerationVisitor generator = new SqlGenerationVisitor(dialect, context); + String actualSql = node.accept(generator); + assertEquals(SqlTestUtil.normalizeSql(expectedSql), SqlTestUtil.normalizeSql(actualSql)); + } + + @Test + public void testInvalidAliases() throws Exception { + TableMetadata clicksMeta = getTestTableMetadata(); + SqlTable fromClause = new SqlTable("TEST", clicksMeta); + SqlSelectList selectList = new SqlSelectList(); + SqlNode node = new SqlStatementSelect(fromClause, selectList, null, null, null, null, null); + + SqlGenerationContext context = new SqlGenerationContext("", "schema", false); + + // Test non-simple scalar functions + for (ScalarFunction function : ScalarFunction.values()) { + if (!function.isSimple()) { + Map scalarAliases = ImmutableMap.of(function, "ALIAS"); + SqlDialect dialect = new AliasesSqlDialect(ImmutableMap.of(), scalarAliases, ImmutableMap.of(), ImmutableMap.of()); + try { + SqlGenerationVisitor generator = new SqlGenerationVisitor(dialect, context); + throw new Exception("Should never arrive here"); + } catch(RuntimeException ex) { + // This error is expected + } + } + } + + // Test non-simple aggregate functions + for (AggregateFunction function : AggregateFunction.values()) { + if (!function.isSimple()) { + Map aggregateAliases = ImmutableMap.of(function, "ALIAS"); + SqlDialect dialect = new AliasesSqlDialect(aggregateAliases, ImmutableMap.of(), ImmutableMap.of(), ImmutableMap.of()); + try { + SqlGenerationVisitor generator = new SqlGenerationVisitor(dialect, context); + throw new Exception("Should never arrive here"); + } catch(RuntimeException ex) { + // This error is expected + } + } + } + } + + private TableMetadata getTestTableMetadata() { + List columns = new ArrayList<>(); + columns.add(new ColumnMetadata("C1", "", DataType.createBool(), true, + false, "", "")); + return new TableMetadata("TEST", "", columns, ""); + } + + static class AliasesSqlDialect extends AbstractSqlDialect { + + private Map aggregationAliases; + private Map scalarAliases; + private Map infixAliases; + private Map prefixAliases; + + public AliasesSqlDialect(Map aggregationAliases, Map scalarAliases + , Map infixAliases, Map prefixAliases) { + super(new SqlDialectContext(new SchemaAdapterNotes(".", "\"", false, false, false, false, false, false, false, false, false, false, true, false))); + this.aggregationAliases = aggregationAliases; + this.scalarAliases = scalarAliases; + this.infixAliases = infixAliases; + this.prefixAliases = prefixAliases; + } + + @Override + public Capabilities getCapabilities() { + Capabilities caps = new Capabilities(); + caps.supportAllCapabilities(); + return caps; + } + + @Override + public SchemaOrCatalogSupport supportsJdbcCatalogs() { + return SchemaOrCatalogSupport.UNSUPPORTED; + } + + @Override + public SchemaOrCatalogSupport supportsJdbcSchemas() { + return SchemaOrCatalogSupport.UNSUPPORTED; + } + + @Override + public Map getAggregateFunctionAliases() { + return aggregationAliases; + } + + @Override + public Map getScalarFunctionAliases() { + return scalarAliases; + } + + @Override + public Map getBinaryInfixFunctionAliases() { + if (infixAliases.isEmpty()) { + return super.getBinaryInfixFunctionAliases(); + } else { + return infixAliases; + } + } + + @Override + public Map getPrefixFunctionAliases() { + if (prefixAliases.isEmpty()) { + return super.getPrefixFunctionAliases(); + } else { + return prefixAliases; + } + } + + @Override + public String getPublicName() { + return "TEST"; + } + + @Override + public IdentifierCaseHandling getUnquotedIdentifierHandling() { + return IdentifierCaseHandling.INTERPRET_AS_UPPER; + } + + @Override + public IdentifierCaseHandling getQuotedIdentifierHandling() { + return IdentifierCaseHandling.INTERPRET_CASE_SENSITIVE; + } + + @Override + public String applyQuote(String identifier) { + return "\"" + identifier + "\""; + } + + @Override + public String applyQuoteIfNeeded(String identifier) { + return identifier; // Intentionally kept simple + } + + @Override + public boolean requiresCatalogQualifiedTableNames(SqlGenerationContext context) { + return false; + } + + @Override + public boolean requiresSchemaQualifiedTableNames(SqlGenerationContext context) { + return true; + } + + @Override + public NullSorting getDefaultNullSorting() { + return NullSorting.NULLS_SORTED_HIGH; + } + + @Override + public String getStringLiteral(String value) { + return "'" + value + "'"; + } + } + +} \ No newline at end of file diff --git a/jdbc-adapter/virtualschema-jdbc-adapter/src/test/java/com/exasol/adapter/dialects/SqlDialectsTest.java b/jdbc-adapter/virtualschema-jdbc-adapter/src/test/java/com/exasol/adapter/dialects/SqlDialectsTest.java new file mode 100644 index 000000000..08b2edcd0 --- /dev/null +++ b/jdbc-adapter/virtualschema-jdbc-adapter/src/test/java/com/exasol/adapter/dialects/SqlDialectsTest.java @@ -0,0 +1,28 @@ +package com.exasol.adapter.dialects; + +import com.exasol.adapter.dialects.impl.ExasolSqlDialect; +import com.exasol.adapter.dialects.impl.ImpalaSqlDialect; +import com.exasol.adapter.jdbc.SchemaAdapterNotes; +import com.google.common.collect.ImmutableList; +import org.junit.Test; + +import static org.junit.Assert.assertTrue; + +public class SqlDialectsTest { + + @Test + public void testGetDialectByName() { + SqlDialects dialects = new SqlDialects(ImmutableList.of(ExasolSqlDialect.NAME, ImpalaSqlDialect.NAME)); + SqlDialectContext context = new SqlDialectContext(new SchemaAdapterNotes(".", "\"", false, false, false, false, false, false, false, false, false, false, true, false)); + assertTrue(dialects.getDialectByName("IMPALA", context).getClass().equals(ImpalaSqlDialect.class)); + + assertTrue(dialects.getDialectByName("iMpAlA", context).getClass().equals(ImpalaSqlDialect.class)); + + assertTrue(dialects.getDialectByName("impala", context).getClass().equals(ImpalaSqlDialect.class)); + + assertTrue(dialects.getDialectByName("EXASOL", context).getClass().equals(ExasolSqlDialect.class)); + + assertTrue(dialects.getDialectByName("unknown-dialect", context) == null); + } + +} diff --git a/jdbc-adapter/virtualschema-jdbc-adapter/src/test/java/com/exasol/adapter/dialects/impl/DialectTestData.java b/jdbc-adapter/virtualschema-jdbc-adapter/src/test/java/com/exasol/adapter/dialects/impl/DialectTestData.java new file mode 100644 index 000000000..805ad8a71 --- /dev/null +++ b/jdbc-adapter/virtualschema-jdbc-adapter/src/test/java/com/exasol/adapter/dialects/impl/DialectTestData.java @@ -0,0 +1,53 @@ +package com.exasol.adapter.dialects.impl; + +import com.exasol.adapter.dialects.SqlDialectContext; +import com.exasol.adapter.jdbc.ColumnAdapterNotes; +import com.exasol.adapter.jdbc.SchemaAdapterNotes; +import com.exasol.adapter.metadata.ColumnMetadata; +import com.exasol.adapter.metadata.DataType; +import com.exasol.adapter.metadata.TableMetadata; +import com.exasol.adapter.sql.*; +import com.google.common.collect.ImmutableList; + +import java.math.BigDecimal; +import java.util.ArrayList; +import java.util.List; + +public class DialectTestData { + + public static SqlNode getTestSqlNode() { + // SELECT USER_ID, count(URL) FROM CLICKS + // WHERE 1 < USER_ID + // GROUP BY USER_ID + // HAVING 1 < COUNT(URL) + // ORDER BY USER_ID + // LIMIT 10; + TableMetadata clicksMeta = getClicksTableMetadata(); + SqlTable fromClause = new SqlTable("CLICKS", clicksMeta); + SqlSelectList selectList = new SqlSelectList(ImmutableList.of( + new SqlColumn(0, clicksMeta.getColumns().get(0)), + new SqlFunctionAggregate(AggregateFunction.COUNT, ImmutableList.of(new SqlColumn(1, clicksMeta.getColumns().get(1))), false))); + SqlNode whereClause = new SqlPredicateLess(new SqlLiteralExactnumeric(BigDecimal.ONE), new SqlColumn(0, clicksMeta.getColumns().get(0))); + SqlExpressionList groupBy = new SqlGroupBy(ImmutableList.of(new SqlColumn(0, clicksMeta.getColumns().get(0)))); + SqlNode countUrl = new SqlFunctionAggregate(AggregateFunction.COUNT, ImmutableList.of(new SqlColumn(1, clicksMeta.getColumns().get(1))), false); + SqlNode having = new SqlPredicateLess(new SqlLiteralExactnumeric(BigDecimal.ONE), countUrl); + SqlOrderBy orderBy = new SqlOrderBy(ImmutableList.of(new SqlColumn(0, clicksMeta.getColumns().get(0))), ImmutableList.of(true), ImmutableList.of(true)); + SqlLimit limit = new SqlLimit(10); + return new SqlStatementSelect(fromClause, selectList, whereClause, groupBy, having, orderBy, limit); + } + + public static TableMetadata getClicksTableMetadata() { + List columns = new ArrayList<>(); + columns.add(new ColumnMetadata("USER_ID", ColumnAdapterNotes.serialize(new ColumnAdapterNotes(3, "DECIMAL")), DataType.createDecimal(18, 0), true, false, "", "")); + columns.add(new ColumnMetadata("URL", ColumnAdapterNotes.serialize(new ColumnAdapterNotes(12, "VARCHAR")), DataType.createVarChar(10000, DataType.ExaCharset.UTF8), true, false, "", "")); + return new TableMetadata("CLICKS", "", columns, ""); + } + + public static SqlDialectContext getExasolDialectContext() { + return new SqlDialectContext(new SchemaAdapterNotes(".", "\"", false, true, false, true, false, false, false, true, false, false, true, false)); + } + + public static SqlDialectContext getOracleDialectContext() { + return new SqlDialectContext(new SchemaAdapterNotes(".", "\"", false, false, false, false, false, false, false, false, false, false, true, false)); + } +} diff --git a/jdbc-adapter/virtualschema-jdbc-adapter/src/test/java/com/exasol/adapter/dialects/impl/ExasolSqlDialectIT.java b/jdbc-adapter/virtualschema-jdbc-adapter/src/test/java/com/exasol/adapter/dialects/impl/ExasolSqlDialectIT.java new file mode 100644 index 000000000..856da90e2 --- /dev/null +++ b/jdbc-adapter/virtualschema-jdbc-adapter/src/test/java/com/exasol/adapter/dialects/impl/ExasolSqlDialectIT.java @@ -0,0 +1,229 @@ +package com.exasol.adapter.dialects.impl; + +import com.exasol.adapter.dialects.AbstractIntegrationTest; +import com.exasol.adapter.dialects.SqlDialects; +import com.exasol.adapter.jdbc.JdbcMetadataReader; +import com.exasol.adapter.json.SchemaMetadataSerializer; +import com.exasol.adapter.metadata.SchemaMetadata; +import com.google.common.collect.ImmutableList; +import org.junit.*; +import org.junit.rules.ExpectedException; + +import java.io.FileNotFoundException; +import java.math.BigDecimal; +import java.sql.ResultSet; +import java.sql.SQLException; +import java.sql.Statement; +import java.util.ArrayList; +import java.util.Arrays; +import java.util.List; + +import static org.junit.Assert.assertNotNull; + +/** + * Integration tests for the EXASOL SQL dialect. + */ +public class ExasolSqlDialectIT extends AbstractIntegrationTest { + + @Rule + public ExpectedException thrown = ExpectedException.none(); + + private static final String testSchema = "NATIVE_EXA_IT"; + private static final String testSchemaMixedCase = "NATIVE_EXA_IT_Mixed_Case"; + private static final String virtualSchema = "VS_EXA_IT"; + private static final String virtualSchemaMixedCase = "VS_EXA_IT_MIXED_CASE"; + + @BeforeClass + public static void setUpClass() throws FileNotFoundException, SQLException, ClassNotFoundException { + Assume.assumeTrue(getConfig().exasolTestsRequested()); + setConnection(connectToExa()); + String connectionString = "jdbc:exa:localhost:" + getPortOfConnectedDatabase(); // connect via Virtual Schema to local database + // The EXASOL jdbc driver is included in the Maven dependencies, so no need to add + List includes = ImmutableList.of(getConfig().getJdbcAdapterPath()); + createJDBCAdapter(includes); + createTestSchema(); + createVirtualSchema( + virtualSchema, + ExasolSqlDialect.NAME, + "", testSchema, + "", + getConfig().getExasolUser(), + getConfig().getExasolPassword(), + "ADAPTER.JDBC_ADAPTER", + connectionString, true, + getConfig().debugAddress(), + ""); + createVirtualSchema( + virtualSchemaMixedCase, + ExasolSqlDialect.NAME, + "", testSchemaMixedCase, + "", + getConfig().getExasolUser(), + getConfig().getExasolPassword(), + "ADAPTER.JDBC_ADAPTER", + connectionString, true, + getConfig().debugAddress(), + ""); + } + + private static void createTestSchema() throws SQLException { + // EXASOL integration test is special, because we can directly create our test data. + // For other dialects you have to prepare the source data base separately, because + // otherwise we would need to make the jdbc driver visible to the integration test framework as well (adds complexity) + Statement stmt = getConnection().createStatement(); + stmt.execute("DROP SCHEMA IF EXISTS " + testSchema + " CASCADE"); + stmt.execute("CREATE SCHEMA " + testSchema); + stmt.execute("CREATE TABLE ALL_EXA_TYPES (" + + " c1 varchar(100) default 'bar'," + + " c2 varchar(100) CHARACTER SET ASCII default 'bar'," + + " c3 char(10) default 'foo'," + + " c4 char(10) CHARACTER SET ASCII default 'bar'," + + " c5 decimal(5,0) default 1," + + " c6 decimal(6,3) default 1.2," + + " c7 double default 1E2," + + " c8 boolean default TRUE," + + " c9 date default '2016-06-01'," + + " c10 timestamp default '2016-06-01 00:00:01.000'," + + " c11 timestamp with local time zone default '2016-06-01 00:00:02.000'," + + " c12 interval year to month default '3-5'," + + " c13 interval day to second default '2 12:50:10.123'," + + " c14 geometry(3857) default 'POINT(2 5)'" + + ")"); + + stmt.execute("INSERT INTO " + testSchema + ".ALL_EXA_TYPES VALUES(" + + "'a茶'," + + "'b'," + + "'c茶'," + + "'d'," + + "123," + + "123.456," + + "2.2," + + "FALSE," + + "'2016-08-01'," + + "'2016-08-01 00:00:01.000'," + + "'2016-08-01 00:00:02.000'," + + "'4-6'," + + "'3 12:50:10.123'," + + "'POINT(2 5)'" + + ");"); + + stmt.execute("CREATE TABLE WITH_NULLS (c1 int, c2 varchar(100))"); + stmt.execute("INSERT INTO WITH_NULLS VALUES " + + " (1, 'a')," + + " (2, null)," + + " (3, 'b')," + + " (1, null)," + + " (null, 'c')"); + + // Create schema, table and column with mixed case identifiers (to test correct mapping, and correct sql generation of adapter) + stmt.execute("DROP SCHEMA IF EXISTS \"" + testSchemaMixedCase + "\" CASCADE"); + stmt.execute("CREATE SCHEMA \"" + testSchemaMixedCase + "\""); + stmt.execute("CREATE TABLE \"Table_Mixed_Case\" (\"Column1\" int, \"column2\" int, COLUMN3 int)"); + stmt.execute("INSERT INTO \"Table_Mixed_Case\" VALUES (1, 2, 3)"); + } + + @Test + public void testDataTypeMapping() throws SQLException { + ResultSet result = executeQuery("SELECT COLUMN_NAME, COLUMN_TYPE, COLUMN_MAXSIZE, COLUMN_NUM_PREC, COLUMN_NUM_SCALE, COLUMN_DEFAULT FROM EXA_DBA_COLUMNS WHERE COLUMN_SCHEMA = '" + virtualSchema + "' AND COLUMN_TABLE='ALL_EXA_TYPES' ORDER BY COLUMN_ORDINAL_POSITION"); + matchNextRow(result, "C1", "VARCHAR(100) UTF8", (long)100, null, null, "'bar'"); + matchNextRow(result, "C2", "VARCHAR(100) ASCII", (long)100, null, null, "'bar'"); + matchNextRow(result, "C3", "CHAR(10) UTF8", (long)10, null, null, "'foo'"); + matchNextRow(result, "C4", "CHAR(10) ASCII", (long)10, null, null, "'bar'"); + matchNextRow(result, "C5", "DECIMAL(5,0)", (long)5, (long)5, (long)0, "1"); + matchNextRow(result, "C6", "DECIMAL(6,3)", (long)6, (long)6, (long)3, "1.2"); + matchNextRow(result, "C7", "DOUBLE", (long)64, null, null, "100"); + matchNextRow(result, "C8", "BOOLEAN", (long)1, null, null, "TRUE"); + matchNextRow(result, "C9", "DATE", (long)10, null, null, "'2016-06-01'"); + matchNextRow(result, "C10", "TIMESTAMP", (long)29, null, null, "'2016-06-01 00:00:01.000'"); + matchNextRow(result, "C11", "TIMESTAMP WITH LOCAL TIME ZONE", (long)29, null, null, "'2016-06-01 00:00:02.000'"); + matchNextRow(result, "C12", "INTERVAL YEAR(2) TO MONTH", (long)13, null, null, "'3-5'"); + matchNextRow(result, "C13", "INTERVAL DAY(2) TO SECOND(3)", (long)29, null, null, "'2 12:50:10.123'"); + matchLastRow(result, "C14", "GEOMETRY(3857)", (long)8000000, null, null, "'POINT(2 5)'"); // srid not yet supported, so will always default to 3857 + } + + @Test + public void testDataTypeSelect() throws SQLException { + ResultSet result = executeQuery("SELECT * FROM " + virtualSchema + ".ALL_EXA_TYPES"); + matchNextRow(result, + "a茶", + "b", + "c茶 ", + "d ", + 123, + new BigDecimal("123.456"), + 2.2, + false, + getSqlDate(2016,8,1), + getSqlTimestamp(2016,8,1,0,0,1,0), + getSqlTimestamp(2016,8,1,0,0,2,0), + "+04-06", + "+03 12:50:10.123", + "POINT (2 5)"); + } + + @Test + public void testIdentifierCaseSensitivity() throws SQLException, FileNotFoundException { + ResultSet result = executeQuery("SELECT * FROM \"Table_Mixed_Case\""); + matchLastRow(result, 1L, 2L, 3L); + result = executeQuery("SELECT \"Column1\", \"column2\", COLUMN3 FROM \"Table_Mixed_Case\""); + matchLastRow(result, 1L, 2L, 3L); + result = executeQuery("SELECT \"Column1\", \"column2\", COLUMN3 FROM \"Table_Mixed_Case\""); + matchLastRow(result, 1L, 2L, 3L); + } + + @Test + public void testIdentifierCaseSensitivityException1() throws SQLException, FileNotFoundException { + thrown.expect(SQLException.class); + thrown.expectMessage("object TABLE_MIXED_CASE not found"); + executeQuery("SELECT \"Column1\", \"column2\", COLUMN3 FROM Table_Mixed_Case"); + } + + @Test + public void testIdentifierCaseSensitivityException2() throws SQLException, FileNotFoundException { + thrown.expect(SQLException.class); + thrown.expectMessage("object COLUMN1 not found"); + executeQuery("SELECT Column1, column2, COLUMN3 FROM \"Table_Mixed_Case\""); + } + + /** + * This was replaced by integration test {@link #testDataTypeMapping()}. It can be enabled temporarily for debugging. + */ + @Ignore + @Test + public void testDifferentDataTypes() throws SQLException, ClassNotFoundException, FileNotFoundException { + Statement stmt = getConnection().createStatement(); + String jdbc_adapter_test_schema = "JDBC_ADAPTER_TEST_SCHEMA"; + String sql = "DROP SCHEMA IF EXISTS " + jdbc_adapter_test_schema + " CASCADE"; + stmt.execute(sql); + sql = "CREATE SCHEMA " + jdbc_adapter_test_schema; + stmt.execute(sql); + sql = "CREATE TABLE T8(c1 boolean default TRUE, c2 char(10) default 'foo'" + + ", c3 date default '2016-06-01', c4 decimal(5,0) default 0)"; + stmt.execute(sql); + sql = "CREATE TABLE T9(c1 double default 1E2, c2 geometry default 'POINT(2 5)'" + + ", c3 interval year to month default '3-5', c4 interval day to second default '2 12:50:10.123')"; + stmt.execute(sql); + sql = "CREATE TABLE TA(c1 timestamp default '2016-06-01 00:00:01.000'" + + ", c2 timestamp with local time zone default '2016-06-01 00:00:02.000', c3 varchar(100) default 'bar')"; + stmt.execute(sql); + sql = "CREATE TABLE TB(c1 boolean default NULL, c2 char(10) default NULL" + + ", c3 date default NULL, c4 decimal(5,0) default NULL)"; + stmt.execute(sql); + sql = "CREATE TABLE TC(c1 double default NULL, c2 geometry default NULL" + + ", c3 interval year to month default NULL, c4 interval day to second default NULL)"; + stmt.execute(sql); + sql = "CREATE TABLE TD(c1 timestamp default NULL, c2 timestamp with local time zone default NULL" + + ", c3 varchar(100) default NULL)"; + stmt.execute(sql); + String[] tableNames = new String[]{"T8", "T9", "TA", "TB", "TC", "TD"}; + List tables = new ArrayList<>(Arrays.asList(tableNames)); + SqlDialects dialects = new SqlDialects(ImmutableList.of(ExasolSqlDialect.NAME)); + SchemaMetadata meta = JdbcMetadataReader.readRemoteMetadata("jdbc:exa:" + getConfig().getExasolAddress(), + getConfig().getExasolUser(), getConfig().getExasolPassword(), "EXA_DB", "JDBC_ADAPTER_TEST_SCHEMA", tables, dialects, ExasolSqlDialect.NAME); + if (getConfig().isDebugOn()) { + System.out.println("Meta: " + SchemaMetadataSerializer.serialize(meta).build().toString()); + } + assertNotNull(meta); + } + +} diff --git a/jdbc-adapter/virtualschema-jdbc-adapter/src/test/java/com/exasol/adapter/dialects/impl/ExasolSqlDialectTest.java b/jdbc-adapter/virtualschema-jdbc-adapter/src/test/java/com/exasol/adapter/dialects/impl/ExasolSqlDialectTest.java new file mode 100644 index 000000000..acdae39d1 --- /dev/null +++ b/jdbc-adapter/virtualschema-jdbc-adapter/src/test/java/com/exasol/adapter/dialects/impl/ExasolSqlDialectTest.java @@ -0,0 +1,56 @@ +package com.exasol.adapter.dialects.impl; + +import com.exasol.adapter.capabilities.Capabilities; +import com.exasol.adapter.capabilities.PredicateCapability; +import com.exasol.adapter.dialects.SqlDialect; +import com.exasol.adapter.dialects.SqlGenerationContext; +import com.exasol.adapter.dialects.SqlGenerationVisitor; +import com.exasol.adapter.sql.SqlNode; +import com.exasol.utils.SqlTestUtil; +import org.junit.Test; + +import static org.junit.Assert.assertEquals; + +public class ExasolSqlDialectTest { + + @Test + public void testApplyQuoteIfNeeded() { + ExasolSqlDialect dialect = new ExasolSqlDialect(DialectTestData.getExasolDialectContext()); + // Regular Identifiers + assertEquals("A1", dialect.applyQuoteIfNeeded("A1")); + assertEquals("A_1", dialect.applyQuoteIfNeeded("A_1")); + assertEquals("A", dialect.applyQuoteIfNeeded("A")); + + // Irregular Identifiers + assertEquals("\"A_a_1\"", dialect.applyQuoteIfNeeded("A_a_1")); + assertEquals("\"1\"", dialect.applyQuoteIfNeeded("1")); + assertEquals("\"1a\"", dialect.applyQuoteIfNeeded("1a")); + assertEquals("\"a\"\"b\"", dialect.applyQuoteIfNeeded("a\"b")); + } + + @Test + public void testCapabilities() { + // Test if EXASOL dialect really has all capabilities + ExasolSqlDialect dialect = new ExasolSqlDialect(DialectTestData.getExasolDialectContext()); + Capabilities caps = dialect.getCapabilities(); + assertEquals(PredicateCapability.values().length, caps.getPredicateCapabilities().size()); + } + + @Test + public void testSqlGenerator() { + SqlNode node = DialectTestData.getTestSqlNode(); + String schemaName = "SCHEMA"; + String expectedSql = "SELECT USER_ID, COUNT(URL) FROM " + schemaName + ".CLICKS" + + " WHERE 1 < USER_ID" + + " GROUP BY USER_ID" + + " HAVING 1 < COUNT(URL)" + + " ORDER BY USER_ID" + + " LIMIT 10"; + SqlGenerationContext context = new SqlGenerationContext("", schemaName, false); + SqlDialect dialect = new ExasolSqlDialect(DialectTestData.getExasolDialectContext()); + SqlGenerationVisitor generator = dialect.getSqlGenerationVisitor(context); + String actualSql = node.accept(generator); + assertEquals(SqlTestUtil.normalizeSql(expectedSql), SqlTestUtil.normalizeSql(actualSql)); + } + +} diff --git a/jdbc-adapter/virtualschema-jdbc-adapter/src/test/java/com/exasol/adapter/dialects/impl/GenericSqlDialectIT.java b/jdbc-adapter/virtualschema-jdbc-adapter/src/test/java/com/exasol/adapter/dialects/impl/GenericSqlDialectIT.java new file mode 100644 index 000000000..5b53f5b1a --- /dev/null +++ b/jdbc-adapter/virtualschema-jdbc-adapter/src/test/java/com/exasol/adapter/dialects/impl/GenericSqlDialectIT.java @@ -0,0 +1,56 @@ +package com.exasol.adapter.dialects.impl; + +import com.exasol.adapter.dialects.AbstractIntegrationTest; +import org.junit.Assume; +import org.junit.BeforeClass; +import org.junit.Test; + +import java.io.FileNotFoundException; +import java.sql.ResultSet; +import java.sql.SQLException; +import java.util.ArrayList; +import java.util.List; + +import static org.junit.Assert.assertEquals; + +public class GenericSqlDialectIT extends AbstractIntegrationTest { + + @BeforeClass + public static void setUpClass() throws FileNotFoundException, SQLException, ClassNotFoundException { + Assume.assumeTrue(getConfig().genericTestsRequested()); + + String connectionString = getConfig().getGenericJdbcConnectionString(); + setConnection(connectToExa()); + createGenericJDBCAdapter(); + String catalogName = "jm3450"; // This only works for the database in our test environment + String schemaName = ""; + createVirtualSchema("VS_GENERIC_MYSQL", + GenericSqlDialect.NAME, + catalogName, + schemaName, + "", + getConfig().getGenericUser(), + getConfig().getGenericPassword(), + "ADAPTER.JDBC_ADAPTER", + connectionString, + false, + getConfig().debugAddress(), + ""); + } + + @Test + public void testVirtualSchema() throws SQLException, ClassNotFoundException, FileNotFoundException { + ResultSet result = executeQuery("SELECT * FROM \"customers\" ORDER BY id"); + result.next(); + assertEquals("1", result.getString(1)); + } + + private static void createGenericJDBCAdapter() throws SQLException, FileNotFoundException { + String jdbcAdapterPath = getConfig().getJdbcAdapterPath(); + String jdbcDriverDriver = getConfig().getGenericJdbcDriverPath(); + List includes = new ArrayList<>(); + includes.add(jdbcAdapterPath); + includes.add(jdbcDriverDriver); + createJDBCAdapter(includes); + } +} diff --git a/jdbc-adapter/virtualschema-jdbc-adapter/src/test/java/com/exasol/adapter/dialects/impl/ImpalaSqlDialectIT.java b/jdbc-adapter/virtualschema-jdbc-adapter/src/test/java/com/exasol/adapter/dialects/impl/ImpalaSqlDialectIT.java new file mode 100644 index 000000000..8fcedc39a --- /dev/null +++ b/jdbc-adapter/virtualschema-jdbc-adapter/src/test/java/com/exasol/adapter/dialects/impl/ImpalaSqlDialectIT.java @@ -0,0 +1,274 @@ +package com.exasol.adapter.dialects.impl; + +import com.exasol.adapter.dialects.AbstractIntegrationTest; +import org.junit.Assume; +import org.junit.BeforeClass; +import org.junit.Test; + +import java.io.FileNotFoundException; +import java.math.BigDecimal; +import java.sql.ResultSet; +import java.sql.SQLException; +import java.util.ArrayList; +import java.util.List; + +/** + * Integration test for the Impala SQL dialect + * + * Testdata: sample_07: code string, description string, total_emp int, salary int + */ +public class ImpalaSqlDialectIT extends AbstractIntegrationTest { + + private static final String virtualSchema = "VS_IMPALA"; + private static final String impalaSchema = "default"; + + @BeforeClass + public static void setUpClass() throws FileNotFoundException, SQLException, ClassNotFoundException { + Assume.assumeTrue(getConfig().impalaTestsRequested()); + setConnection(connectToExa()); + + createImpalaJDBCAdapter(); + createVirtualSchema( + virtualSchema, + ImpalaSqlDialect.NAME, + "", impalaSchema, + "", + "no-user", + "no-password", + "ADAPTER.JDBC_ADAPTER", + getConfig().getImpalaJdbcConnectionString(), + false, + getConfig().debugAddress(), + "SAMPLE_07,ALL_HIVE_IMPALA_TYPES,SIMPLE,SIMPLE_WITH_NULLS"); + } + + @Test + public void testTypeMapping() throws SQLException, ClassNotFoundException, FileNotFoundException { + // TODO Test type mapping for tables with invalid Impala Types + ResultSet result = executeQuery("SELECT COLUMN_NAME, COLUMN_TYPE, COLUMN_MAXSIZE, COLUMN_NUM_PREC, COLUMN_NUM_SCALE, COLUMN_DEFAULT FROM EXA_DBA_COLUMNS WHERE COLUMN_SCHEMA = '" + virtualSchema + "' AND COLUMN_TABLE='ALL_HIVE_IMPALA_TYPES' ORDER BY COLUMN_ORDINAL_POSITION"); + matchNextRow(result, "C1", "DECIMAL(3,0)", 3L, 3L, 0L, null); + matchNextRow(result, "C2", "DECIMAL(5,0)", 5L, 5L, 0L, null); + matchNextRow(result, "C3", "DECIMAL(10,0)", (long)10, (long)10, (long)0, null); + matchNextRow(result, "C4", "DECIMAL(19,0)", (long)19, (long)19, (long)0, null); + matchNextRow(result, "C5", "DOUBLE", (long)64, null, null, null); + matchNextRow(result, "C6", "DOUBLE", (long)64, null, null, null); + matchNextRow(result, "C7", "DECIMAL(9,0)", (long)9, (long)9, (long)0, null); + matchNextRow(result, "C8", "DECIMAL(12,2)", (long)12, (long)12, (long)2, null); + matchNextRow(result, "C9", "VARCHAR(2000000) UTF8", (long)2000000, null, null, null); + matchNextRow(result, "C10", "TIMESTAMP", (long)29, null, null, null); + // Impala has problems with STRING data type, and probably automatically restricts it to ASCII (otherwise several operations don't work with the column) + // See http://www.cloudera.com/documentation/enterprise/5-5-x/topics/impala_string.html + matchNextRow(result, "C11", "VARCHAR(32767) ASCII", (long)32767, null, null, null); + matchNextRow(result, "C12", "VARCHAR(1000) UTF8", (long)1000, null, null, null); + matchNextRow(result, "C13", "CHAR(10) UTF8", (long)10, null, null, null); + matchLastRow(result, "C14", "BOOLEAN", (long)1, null, null, null); + } + + @Test + public void testSelectWithAllTypes() throws SQLException { + ResultSet result = executeQuery("SELECT * from " + virtualSchema + ".ALL_HIVE_IMPALA_TYPES"); + matchLastRow(result, + (short)123, + 12345, + 1234567890L, + new BigDecimal(1234567890123456789L), + 12.199999809265137, + 12.2, + "12345", // TODO Why is this string? + "12345.12", // TODO Why is this string? + "12345.12",getSqlTimestamp(1985, 9, 25, 17, 45, 30, 5), + "abc", + "varchar 茶", + "char 茶 ", // 茶 requires 3 bytes, and char(10) means 10 bytes for Impala. + true); + } + + @Test + public void testSimpleQuery() throws SQLException, ClassNotFoundException, FileNotFoundException { + String query = "SELECT * FROM sample_07"; + ResultSet result = executeQuery(query); + matchNextRow(result, "00-0000", "All Occupations", (long)134354250, (long)40690); + } + + @Test + public void testProjection() throws SQLException { + String query = "SELECT c2 FROM " + virtualSchema + ".ALL_HIVE_IMPALA_TYPES"; + ResultSet result = executeQuery(query); + matchLastRow(result, 12345); + matchSingleRowExplain(query, "SELECT `C2` FROM `default`.`ALL_HIVE_IMPALA_TYPES`"); + } + + @Test + public void testComparisonPredicates() throws SQLException { + // =, !=, <, <=, >, >= + String query = "select salary, salary=33880, salary!=33880, salary<33880, salary<=33880, salary>33880, salary>=33880 from " + virtualSchema + ".sample_07 where code = '11-1031'"; + ResultSet result = executeQuery(query); + matchLastRow(result, 33880L, true, false, false, true, false, true); + matchSingleRowExplain(query, "SELECT `SALARY`, `SALARY` = 33880, `SALARY` != 33880, `SALARY` < 33880, `SALARY` <= 33880, 33880 < `SALARY`, 33880 <= `SALARY` FROM `default`.`SAMPLE_07` WHERE `CODE` = '11-1031'"); + } + + @Test + public void testLogicalPredicates() throws SQLException { + // NOT, AND, OR + String query = "select * from vs_impala.simple_with_nulls where (c1 < 2 or c1 > 2) and not (c2 is null)"; + ResultSet result = executeQuery(query); + matchNextRow(result, 1L, "a"); + matchLastRow(result, 3L, "b"); + matchSingleRowExplain(query, "SELECT * FROM `default`.`SIMPLE_WITH_NULLS` WHERE ((`C1` < 2 OR 2 < `C1`) AND NOT (`C2` IS NULL))"); + } + + @Test + public void testLikePredicates() throws SQLException { + // LIKE, LIKE ESCAPE (not pushed down), REGEXP_LIKE + String query = "select code, code like 'x%1' escape 'x' from " + virtualSchema + ".sample_07 where (code like '15%' and not code regexp_like '.*1$')"; + ResultSet result = executeQuery(query); + matchNextRow(result, "15-0000", false); + matchNextRow(result, "15-1032", false); + matchNextRow(result, "15-1099", false); + matchLastRow(result, "15-2099", false); + matchSingleRowExplain(query, "SELECT `CODE` FROM `default`.`SAMPLE_07` WHERE (`CODE` LIKE '15%' AND NOT (`CODE` REGEXP '.*1$'))"); + } + + @Test + public void testMiscPredicates() throws SQLException { + // BETWEEN, IN, IS NULL, IS NOT NULL + String query = "select c1, c2, c1 in (2, 3), c2 is null, c2 is not null from vs_impala.simple_with_nulls WHERE c1 between 1 and 2"; + ResultSet result = executeQuery(query); + matchNextRow(result, 1L, "a", false, false, true); + matchNextRow(result, 2L, null, true, true, false); + matchLastRow(result, 1L, null, false, true, false); + matchSingleRowExplain(query, "SELECT `C1`, `C2`, `C1` IN (2, 3), `C2` IS NULL, `C2` IS NOT NULL FROM `default`.`SIMPLE_WITH_NULLS` WHERE `C1` BETWEEN 1 AND 2"); + } + + @Test + public void testCountSumAggregateFunction() throws SQLException { + String query = "SELECT COUNT(A), COUNT(*), COUNT(DISTINCT A), SUM(A), SUM(DISTINCT A) from vs_impala.simple"; + ResultSet result = executeQuery(query); + matchLastRow(result, new BigDecimal(6), new BigDecimal(6), new BigDecimal(3), 12D, 6D); + matchSingleRowExplain(query, "SELECT COUNT(`A`), COUNT(*), COUNT(DISTINCT `A`), CAST(SUM(`A`) AS DOUBLE), CAST(SUM(DISTINCT `A`) AS DOUBLE) FROM `default`.`SIMPLE`"); + } + + public void testAvgMinMaxAggregateFunction() throws SQLException { + String query = "SELECT AVG(C), MIN(A), MIN(DISTINCT A), MAX(A), MAX(DISTINCT A) from VS_IMPALA.SIMPLE"; + ResultSet result = executeQuery(query); + matchLastRow(result, new BigDecimal(3.85), new BigDecimal(1), new BigDecimal(1), new BigDecimal(3), new BigDecimal(3)); + matchSingleRowExplain(query, "SELECT AVG(`C`), MIN(`A`), MIN(`A`), MAX(`A`), MAX(`A`) FROM `default`.`SIMPLE`"); + } + + @Test + public void testLiteralsPredicates() throws SQLException { + // String/varchar, bool, null, double, decimal + String query = "select count(*) from vs_impala.ALL_HIVE_IMPALA_TYPES where c11 = 'abc' and c12 = 'varchar 茶' and c6 = 1.22E1 and c8 = 12345.12"; + ResultSet result = executeQuery(query); + matchLastRow(result, new BigDecimal(1)); + matchSingleRowExplain(query, "SELECT COUNT(*) FROM `default`.`ALL_HIVE_IMPALA_TYPES` WHERE (`C11` = 'abc' AND `C12` = 'varchar 茶' AND `C6` = 12.2 AND `C8` = 12345.12)"); + } + + @Test + public void testAggregationFunctions() throws SQLException { + /** + * COUNT(A) + COUNT(*) + COUNT(DISTINCT A) + COUNT(ALL (A, C)) + COVAR_POP(A, C) + COVAR_SAMP(A, C) + FIRST_VALUE(A) + GROUP_CONCAT(A) + GROUP_CONCAT(DISTINCT A) + GROUP_CONCAT(A ORDER BY C) + GROUP_CONCAT(A ORDER BY C DESC) + GROUP_CONCAT(A SEPARATOR + GROUPING(A) + GROUPING(A, C) + GROUPING_ID(A) + GROUPING_ID(A, C) + LAST_VALUE(A) + MAX(A) + MAX(ALL A) + MAX(DISTINCT A) + MEDIAN(A) + MIN(A) + MIN(ALL A) + MIN(DISTINCT A) + PERCENTILE_CONT(0.5) WITHIN GROUP (ORDER BY A) + PERCENTILE_DISC(0.5) WITHIN GROUP (ORDER BY A) + REGR_AVGX(A, C) + REGR_AVGY(A, C) + REGR_COUNT(A, C) + REGR_INTERCEPT(A, C) + REGR_R2(A, C) + REGR_SLOPE(A, C) + REGR_SXX(A, C) + REGR_SXY(A, C) + REGR_SYY(A, C) + STDDEV(A) + STDDEV(ALL A) + STDDEV(DISTINCT A) + STDDEV_POP(A) + STDDEV_POP(ALL A) + STDDEV_POP(DISTINCT A) + STDDEV_SAMP(A) + STDDEV_SAMP(ALL A) + STDDEV_SAMP(DISTINCT A) + SUM(A) + SUM(ALL A) + SUM(DISTINCT A) + VAR_POP(A) + VAR_POP(ALL A) + VAR_POP(DISTINCT A) + VAR_SAMP(A) + VAR_SAMP(ALL A) + VAR_SAMP(DISTINCT A) + VARIANCE(A) + VARIANCE(ALL A) + VARIANCE(DISTINCT A) + */ + } + + @Test + public void testOrderBy() throws SQLException { + String query = "SELECT CODE, SALARY from sample_07 ORDER BY SALARY"; + ResultSet result = executeQuery(query); + matchNextRow(result, "35-3021", 16700L); + matchSingleRowExplain(query, "SELECT `CODE`, `SALARY` FROM `default`.`SAMPLE_07` ORDER BY `SALARY`"); + } + + @Test + public void testOrderByLimit() throws SQLException { + String query = "SELECT CODE, SALARY from sample_07 ORDER BY SALARY LIMIT 3"; + ResultSet result = executeQuery(query); + matchNextRow(result, "35-3021", 16700L); + matchNextRow(result, "35-2011", 16860L); + matchLastRow(result, "35-9021", 17060L); + matchSingleRowExplain(query, "SELECT `CODE`, `SALARY` FROM `default`.`SAMPLE_07` ORDER BY `SALARY` LIMIT 3"); + } + + @Test + public void testOrderByLimitOffset() throws SQLException { + String query = "SELECT CODE, SALARY from sample_07 ORDER BY SALARY LIMIT 2 OFFSET 1"; + ResultSet result = executeQuery(query); + matchNextRow(result, "35-2011", 16860L); + matchLastRow(result, "35-9021", 17060L); + matchSingleRowExplain(query, "SELECT `CODE`, `SALARY` FROM `default`.`SAMPLE_07` ORDER BY `SALARY` LIMIT 2 OFFSET 1"); + } + + @Test + public void testAggregateFunctions() throws SQLException, ClassNotFoundException, FileNotFoundException { + String query = "SELECT count(*), count(salary), count(distinct salary) FROM sample_07"; + ResultSet result = executeQuery(query); + matchLastRow(result, new BigDecimal(823), new BigDecimal(819), new BigDecimal(759)); + matchSingleRowExplain(query, "SELECT COUNT(*), COUNT(`SALARY`), COUNT(DISTINCT `SALARY`) FROM `default`.`SAMPLE_07`"); + } + + private static void createImpalaJDBCAdapter() throws SQLException, FileNotFoundException { + List impalaIncludes = new ArrayList<>(); + impalaIncludes.add(getConfig().getJdbcAdapterPath()); + String jdbcPrefixPath = getConfig().getImpalaJdbcPrefixPath(); + for (String jar : getConfig().getImpalaJdbcJars()) { + impalaIncludes.add(jdbcPrefixPath + jar); + } + createJDBCAdapter(impalaIncludes); + } + +} diff --git a/jdbc-adapter/virtualschema-jdbc-adapter/src/test/java/com/exasol/adapter/dialects/impl/KerberosIT.java b/jdbc-adapter/virtualschema-jdbc-adapter/src/test/java/com/exasol/adapter/dialects/impl/KerberosIT.java new file mode 100644 index 000000000..e6e58a866 --- /dev/null +++ b/jdbc-adapter/virtualschema-jdbc-adapter/src/test/java/com/exasol/adapter/dialects/impl/KerberosIT.java @@ -0,0 +1,105 @@ +package com.exasol.adapter.dialects.impl; + +import com.exasol.adapter.dialects.AbstractIntegrationTest; +import org.junit.Assume; +import org.junit.BeforeClass; +import org.junit.Test; + +import java.io.FileNotFoundException; +import java.sql.Connection; +import java.sql.ResultSet; +import java.sql.SQLException; +import java.sql.Statement; +import java.util.ArrayList; +import java.util.List; + +import static org.junit.Assert.assertEquals; + +/** + * Integration test for JDBC drivers requiring Kerberos authentication. This is currently only tested for the Cloudera Hive JDBC driver developed by Simba (probably also works for the Cloudera Impala Driver developed by Simba) + */ +public class KerberosIT extends AbstractIntegrationTest { + + private static final String virtualSchema = "VS_KERBEROS_IT"; + private static final String connectionName = "krb_conn"; + + @BeforeClass + public static void setUpClass() throws FileNotFoundException, SQLException, ClassNotFoundException { + Assume.assumeTrue(getConfig().kerberosTestsRequested()); + final String kerberosConnectionString = getConfig().getKerberosJdbcConnectionString(); + final String kerberosUser = getConfig().getKerberosUser(); + final String kerberosPassword = getConfig().getKerberosPassword(); + setConnection(connectToExa()); + createKerberosJDBCAdapter(); + final String connectionName = "krb_conn"; + createConnection(connectionName, kerberosConnectionString, kerberosUser, kerberosPassword); + } + + @Test + public void testKerberosVirtualSchema() throws SQLException, ClassNotFoundException, FileNotFoundException { + createVirtualSchema( + virtualSchema, + ExasolSqlDialect.NAME, + "", + "default", + connectionName, + "", + "", + "ADAPTER.JDBC_ADAPTER", + "", false, + getConfig().debugAddress(), + ""); + Statement stmt = getConnection().createStatement(); + ResultSet result = stmt.executeQuery("SELECT * FROM \"sample_07\""); + result.next(); + assertEquals("00-0000", result.getString(1)); + } + + @Test + public void testKerberosVirtualSchemaGrantConnection() throws SQLException, ClassNotFoundException, FileNotFoundException { + // Create Kerberos Virtual Schema using a different user, which has the appropriate privileges for the connection + final String userName = "user1"; + Statement stmt = getConnection().createStatement(); + stmt.execute("DROP USER IF EXISTS " + userName +" CASCADE"); + stmt.execute("CREATE USER " + userName +" identified by \"" + userName + "\""); + stmt.execute("GRANT CREATE SESSION TO " + userName); + stmt.execute("GRANT CREATE VIRTUAL SCHEMA TO " + userName); + stmt.execute("GRANT DROP ANY VIRTUAL SCHEMA TO " + userName); + final String adapterName = "ADAPTER.JDBC_ADAPTER"; + stmt.execute("GRANT EXECUTE ON " + adapterName + " TO " + userName); + stmt.execute("GRANT ACCESS ON CONNECTION " + connectionName + " TO " + userName); + stmt.execute("GRANT CONNECTION " + connectionName + " TO " + userName); + stmt.execute("COMMIT"); + Connection conn2 = connectToExa(userName, userName); + Statement stmt2 = conn2.createStatement(); + createVirtualSchema( + conn2, + virtualSchema, + ExasolSqlDialect.NAME, + "", + "default", + connectionName, + "", + "", + adapterName, + "", false, + getConfig().debugAddress(), + ""); + ResultSet result = stmt2.executeQuery("SELECT * FROM \"sample_07\""); + result.next(); + assertEquals("00-0000", result.getString(1)); + stmt.execute("DROP USER IF EXISTS " + userName +" CASCADE"); + + } + + private static void createKerberosJDBCAdapter() throws SQLException, FileNotFoundException { + List kerberosIncludes = new ArrayList<>(); + kerberosIncludes.add(getConfig().getJdbcAdapterPath()); + String jdbcPrefixPath = getConfig().getKerberosJdbcPrefixPath(); + for (String jar : getConfig().getKerberosJdbcJars()) { + kerberosIncludes.add(jdbcPrefixPath + jar); + } + createJDBCAdapter(kerberosIncludes); + } + +} diff --git a/jdbc-adapter/virtualschema-jdbc-adapter/src/test/java/com/exasol/adapter/dialects/impl/OracleSqlDialectIT.java b/jdbc-adapter/virtualschema-jdbc-adapter/src/test/java/com/exasol/adapter/dialects/impl/OracleSqlDialectIT.java new file mode 100644 index 000000000..7189536ea --- /dev/null +++ b/jdbc-adapter/virtualschema-jdbc-adapter/src/test/java/com/exasol/adapter/dialects/impl/OracleSqlDialectIT.java @@ -0,0 +1,169 @@ +package com.exasol.adapter.dialects.impl; + +import com.exasol.adapter.dialects.AbstractIntegrationTest; +import org.junit.Assume; +import org.junit.Before; +import org.junit.Test; + +import java.io.FileNotFoundException; +import java.math.BigDecimal; +import java.sql.Connection; +import java.sql.ResultSet; +import java.sql.SQLException; +import java.sql.Statement; +import java.util.ArrayList; +import java.util.List; + +import static org.junit.Assert.assertEquals; + +/** + * Tested with Oracle 12 + * + * TODO Add tests for data types + * TODO Test Expanding of SELECT * if elements of select list require casting + */ +public class OracleSqlDialectIT extends AbstractIntegrationTest { + + private static final String virtualSchema = "VS_ORACLE"; + private static final String oracleSchema = "C##LOADER"; + + @Before + public void beforeMethod() throws FileNotFoundException, SQLException, ClassNotFoundException { + Assume.assumeTrue(getConfig().oracleTestsRequested()); + setConnection(connectToExa()); + createOracleJDBCAdapter(); + createVirtualSchema( + virtualSchema, + OracleSqlDialect.NAME, + "", oracleSchema, + "", + "C##LOADER", + "loader", + "ADAPTER.JDBC_ADAPTER", + getConfig().getOracleJdbcConnectionString(), + false, + getConfig().debugAddress(), + "ALL_TYPES"); + } + + @Test + public void testVirtualSchema() throws SQLException, ClassNotFoundException, FileNotFoundException { + ResultSet result = executeQuery("SELECT C3 FROM ALL_TYPES"); + result.next(); + assertEquals("cccccccccccccccccccc", result.getString(1)); + } + + @Test + public void testSelectProjection() throws SQLException, ClassNotFoundException, FileNotFoundException { + String query = "SELECT C7 FROM ALL_TYPES"; + ResultSet result = executeQuery(query); + matchNextRow(result, new BigDecimal("12345.12345")); + matchNextRow(result, new BigDecimal("12355.12345")); + matchSingleRowExplain(query, "SELECT C7 FROM \"C##LOADER\".ALL_TYPES"); + } + + @Test + public void testSelectExpression() throws SQLException, ClassNotFoundException, FileNotFoundException { + String query = "SELECT C7 + 1 FROM ALL_TYPES"; + ResultSet result = executeQuery(query); + matchNextRow(result, "12346.12345"); + matchNextRow(result, "12356.12345"); + matchSingleRowExplain(query, "SELECT CAST((C7 + 1) AS FLOAT) FROM \"C##LOADER\".ALL_TYPES"); + } + + @Test + public void testFilterExpression() throws SQLException, ClassNotFoundException, FileNotFoundException { + String query = "SELECT C7 FROM ALL_TYPES WHERE C7 > 12346"; + ResultSet result = executeQuery(query); + matchNextRow(result, new BigDecimal("12355.12345")); + matchSingleRowExplain(query, "SELECT C7 FROM \"C##LOADER\".ALL_TYPES WHERE 12346 < C7"); + } + + @Test + public void testAggregateSingleGroup() throws SQLException, ClassNotFoundException, FileNotFoundException { + String query = "SELECT min(C7) FROM ALL_TYPES"; + ResultSet result = executeQuery(query); + matchNextRow(result, "12345.12345"); + matchSingleRowExplain(query, "SELECT CAST(MIN(C7) AS FLOAT) FROM \"C##LOADER\".ALL_TYPES"); + } + + @Test + public void testAggregateGroupByColumn() throws SQLException, ClassNotFoundException, FileNotFoundException { + String query = "SELECT C5, min(C7) FROM ALL_TYPES GROUP BY C5"; + ResultSet result = executeQuery(query); + matchNextRow(result, "12345678901234567890", "12345.12345"); + matchNextRow(result, "1234567890.123456789", "12355.12345"); + matchSingleRowExplain(query, "SELECT TO_CHAR(C5), CAST(MIN(C7) AS FLOAT) FROM \"C##LOADER\".ALL_TYPES GROUP BY C5"); + } + + @Test + public void testAggregateGroupByExpression() throws SQLException, ClassNotFoundException, FileNotFoundException { + String query = "SELECT C5 + 1, min(C7) FROM ALL_TYPES GROUP BY C5 + 1"; + ResultSet result = executeQuery(query); + matchNextRow(result, "12345678901234567891", "12345.12345"); + matchNextRow(result, "1234567891.123456789", "12355.12345"); + matchSingleRowExplain(query, "SELECT CAST((C5 + 1) AS FLOAT), CAST(MIN(C7) AS FLOAT) FROM \"C##LOADER\".ALL_TYPES GROUP BY (C5 + 1)"); + } + + @Test + public void testAggregateGroupByTuple() throws SQLException, ClassNotFoundException, FileNotFoundException { + String query = "SELECT C3, C5, min(C7) FROM ALL_TYPES GROUP BY C3, C5 ORDER BY C5 DESC"; + ResultSet result = executeQuery(query); + matchNextRow(result, "cccccccccccccccccccc", "12345678901234567890", "12345.12345"); + matchNextRow(result, "cccccccccccccccccccc", "1234567890.123456789", "12355.12345"); + matchSingleRowExplain(query, "SELECT C3, TO_CHAR(C5), CAST(MIN(C7) AS FLOAT) FROM \"C##LOADER\".ALL_TYPES GROUP BY C3, C5 ORDER BY C5 DESC"); + } + + @Test + public void testAggregateHaving() throws SQLException, ClassNotFoundException, FileNotFoundException { + String query = "SELECT C5, min(C7) FROM ALL_TYPES GROUP BY C5 HAVING MIN(C7) > 12350"; + ResultSet result = executeQuery(query); + matchNextRow(result, "1234567890.123456789", "12355.12345"); + matchSingleRowExplain(query, "SELECT TO_CHAR(C5), CAST(MIN(C7) AS FLOAT) FROM \"C##LOADER\".ALL_TYPES GROUP BY C5 HAVING 12350 < MIN(C7)"); + } + + @Test + public void testOrderByColumn() throws SQLException, ClassNotFoundException, FileNotFoundException { + String query = "SELECT C1 FROM ALL_TYPES ORDER BY C1 DESC NULLS LAST"; + ResultSet result = executeQuery(query); + matchNextRow(result, "aaaaaaaaaaaaaaaaaaaa "); + matchNextRow(result, (Object)null); + matchSingleRowExplain(query, "SELECT C1 FROM \"C##LOADER\".ALL_TYPES ORDER BY C1 DESC NULLS LAST"); + } + + @Test + public void testOrderByExpression() throws SQLException, ClassNotFoundException, FileNotFoundException { + String query = "SELECT C7 FROM ALL_TYPES ORDER BY ABS(C7) DESC NULLS FIRST"; + ResultSet result = executeQuery(query); + matchNextRow(result, new BigDecimal("12355.12345")); + matchNextRow(result, new BigDecimal("12345.12345")); + matchSingleRowExplain(query, "SELECT C7 FROM \"C##LOADER\".ALL_TYPES ORDER BY ABS(C7) DESC"); + } + + @Test + public void testLimit() throws SQLException, ClassNotFoundException, FileNotFoundException { + String query = "SELECT C7 FROM ALL_TYPES ORDER BY C7 LIMIT 2"; + ResultSet result = executeQuery(query); + matchNextRow(result, new BigDecimal("12345.12345")); + matchNextRow(result, new BigDecimal("12355.12345")); + matchSingleRowExplain(query, "SELECT LIMIT_SUBSELECT.* FROM ( SELECT C7 FROM \"C##LOADER\".ALL_TYPES ORDER BY C7 ) LIMIT_SUBSELECT WHERE ROWNUM <= 2"); + } + + @Test + public void testLimitOffset() throws SQLException, ClassNotFoundException, FileNotFoundException { + String query = "SELECT C7 FROM ALL_TYPES ORDER BY C7 LIMIT 1 OFFSET 1"; + ResultSet result = executeQuery(query); + matchNextRow(result, new BigDecimal("12355.12345")); + matchSingleRowExplain(query, "SELECT c0 FROM ( SELECT LIMIT_SUBSELECT.*, ROWNUM ROWNUM_SUB FROM ( SELECT C7 AS c0 FROM \"C##LOADER\".ALL_TYPES ORDER BY C7 ) LIMIT_SUBSELECT WHERE ROWNUM <= 2 ) WHERE ROWNUM_SUB > 1"); + } + + private static void createOracleJDBCAdapter() throws SQLException, FileNotFoundException { + String jdbcAdapterPath = getConfig().getJdbcAdapterPath(); + String oracleJdbcDriverdbcDriver = getConfig().getOracleJdbcDriverPath(); + List includes = new ArrayList<>(); + includes.add(jdbcAdapterPath); + includes.add(oracleJdbcDriverdbcDriver); + createJDBCAdapter(includes); + } + +} diff --git a/jdbc-adapter/virtualschema-jdbc-adapter/src/test/java/com/exasol/adapter/dialects/impl/OracleSqlDialectTest.java b/jdbc-adapter/virtualschema-jdbc-adapter/src/test/java/com/exasol/adapter/dialects/impl/OracleSqlDialectTest.java new file mode 100644 index 000000000..8cd7a39c8 --- /dev/null +++ b/jdbc-adapter/virtualschema-jdbc-adapter/src/test/java/com/exasol/adapter/dialects/impl/OracleSqlDialectTest.java @@ -0,0 +1,86 @@ +package com.exasol.adapter.dialects.impl; + +import com.exasol.adapter.dialects.SqlDialect; +import com.exasol.adapter.dialects.SqlDialectContext; +import com.exasol.adapter.dialects.SqlGenerationContext; +import com.exasol.adapter.dialects.SqlGenerationVisitor; +import com.exasol.adapter.jdbc.SchemaAdapterNotes; +import com.exasol.adapter.sql.SqlNode; +import com.exasol.adapter.sql.SqlSelectList; +import com.exasol.adapter.sql.SqlStatementSelect; +import com.exasol.utils.SqlTestUtil; +import org.junit.Test; +import org.mockito.Mockito; + +import static org.junit.Assert.assertEquals; + + +public class OracleSqlDialectTest { + + @Test + public void testSqlGeneratorWithLimit() { + SqlNode node = DialectTestData.getTestSqlNode(); + String schemaName = "SCHEMA"; + String expectedSql = "SELECT LIMIT_SUBSELECT.* FROM ( " + + " SELECT USER_ID, COUNT(URL) " + + " FROM SCHEMA.CLICKS" + + " WHERE 1 < USER_ID" + + " GROUP BY USER_ID" + + " HAVING 1 < COUNT(URL)" + + " ORDER BY USER_ID " + + ") LIMIT_SUBSELECT WHERE ROWNUM <= 10" ; + SqlGenerationContext context = new SqlGenerationContext("", schemaName, false); + SqlDialectContext dialectContext = new SqlDialectContext(Mockito.mock(SchemaAdapterNotes.class)); + SqlDialect dialect = new OracleSqlDialect(dialectContext); + SqlGenerationVisitor generator = dialect.getSqlGenerationVisitor(context); + String actualSql = node.accept(generator); + assertEquals(SqlTestUtil.normalizeSql(expectedSql), SqlTestUtil.normalizeSql(actualSql)); + } + + @Test + public void testSqlGeneratorWithLimitOffset() { + SqlNode node = DialectTestData.getTestSqlNode(); + ((SqlStatementSelect)node).getLimit().setOffset(5); + String schemaName = "SCHEMA"; + String expectedSql = "SELECT c0, c1 FROM (" + + " SELECT LIMIT_SUBSELECT.*, ROWNUM ROWNUM_SUB FROM ( " + + " SELECT USER_ID AS c0, COUNT(URL) AS c1 " + + " FROM SCHEMA.CLICKS" + + " WHERE 1 < USER_ID" + + " GROUP BY USER_ID" + + " HAVING 1 < COUNT(URL)" + + " ORDER BY USER_ID" + + " ) LIMIT_SUBSELECT WHERE ROWNUM <= 15 " + + ") WHERE ROWNUM_SUB > 5"; + SqlGenerationContext context = new SqlGenerationContext("", schemaName, false); + SqlDialectContext dialectContext = new SqlDialectContext(Mockito.mock(SchemaAdapterNotes.class)); + SqlDialect dialect = new OracleSqlDialect(dialectContext); + SqlGenerationVisitor generator = dialect.getSqlGenerationVisitor(context); + String actualSql = node.accept(generator); + assertEquals(SqlTestUtil.normalizeSql(expectedSql), SqlTestUtil.normalizeSql(actualSql)); + } + + @Test + public void testSqlGeneratorWithSelectStarAndOffset() { + SqlStatementSelect node = (SqlStatementSelect) DialectTestData.getTestSqlNode(); + node.getLimit().setOffset(5); + node = new SqlStatementSelect(node.getFromClause(), new SqlSelectList(), node.getWhereClause(), node.getGroupBy(), node.getHaving(), node.getOrderBy(), node.getLimit()); + String schemaName = "SCHEMA"; + String expectedSql = "SELECT c0, c1 FROM (" + + " SELECT LIMIT_SUBSELECT.*, ROWNUM ROWNUM_SUB FROM ( " + + " SELECT USER_ID AS c0, URL AS c1 " + + " FROM SCHEMA.CLICKS" + + " WHERE 1 < USER_ID" + + " GROUP BY USER_ID" + + " HAVING 1 < COUNT(URL)" + + " ORDER BY USER_ID" + + " ) LIMIT_SUBSELECT WHERE ROWNUM <= 15 " + + ") WHERE ROWNUM_SUB > 5"; + SqlGenerationContext context = new SqlGenerationContext("", schemaName, false); + SqlDialectContext dialectContext = new SqlDialectContext(Mockito.mock(SchemaAdapterNotes.class)); + SqlDialect dialect = new OracleSqlDialect(dialectContext); + SqlGenerationVisitor generator = dialect.getSqlGenerationVisitor(context); + String actualSql = node.accept(generator); + assertEquals(SqlTestUtil.normalizeSql(expectedSql), SqlTestUtil.normalizeSql(actualSql)); + } +} diff --git a/jdbc-adapter/virtualschema-jdbc-adapter/src/test/java/com/exasol/adapter/jdbc/JdbcAdapterPropertiesTest.java b/jdbc-adapter/virtualschema-jdbc-adapter/src/test/java/com/exasol/adapter/jdbc/JdbcAdapterPropertiesTest.java new file mode 100644 index 000000000..cefb711ef --- /dev/null +++ b/jdbc-adapter/virtualschema-jdbc-adapter/src/test/java/com/exasol/adapter/jdbc/JdbcAdapterPropertiesTest.java @@ -0,0 +1,212 @@ +package com.exasol.adapter.jdbc; + +import static org.junit.Assert.assertEquals; + +import java.util.Arrays; +import java.util.HashMap; +import java.util.List; +import java.util.Map; + +import com.google.common.collect.ImmutableList; +import com.google.common.collect.Lists; +import org.junit.Rule; +import org.junit.Test; +import org.junit.rules.ExpectedException; + +public class JdbcAdapterPropertiesTest { + + @Rule + public ExpectedException thrown = ExpectedException.none(); + + private static Map getMinimumMandatory() { + Map properties = new HashMap<>(); + properties.put(JdbcAdapterProperties.PROP_SQL_DIALECT, "GENERIC"); + properties.put(JdbcAdapterProperties.PROP_CONNECTION_NAME, "MY_CONN"); + return properties; + } + + @Test + public void testNoCredentials() { + Map properties = new HashMap<>(); + properties.put(JdbcAdapterProperties.PROP_SQL_DIALECT, "GENERIC"); + properties.put(JdbcAdapterProperties.PROP_SCHEMA_NAME, "MY_SCHEMA"); + thrown.expect(RuntimeException.class); + thrown.expectMessage("You did not specify a connection (CONNECTION_NAME) and therefore have to specify"); + JdbcAdapterProperties.checkPropertyConsistency(properties, JdbcAdapter.supportedDialects); + } + + @Test + public void testUserNamePasswordOptional() { + Map properties = new HashMap<>(); + properties.put(JdbcAdapterProperties.PROP_SQL_DIALECT, "GENERIC"); + properties.put(JdbcAdapterProperties.PROP_CONNECTION_STRING, "MY_CONN"); + JdbcAdapterProperties.checkPropertyConsistency(properties, JdbcAdapter.supportedDialects); + } + + @Test + public void testRedundantCredentials() { + Map properties = getMinimumMandatory(); + properties.put(JdbcAdapterProperties.PROP_CONNECTION_STRING, "MY_CONN"); + thrown.expect(RuntimeException.class); + thrown.expectMessage("You specified a connection (CONNECTION_NAME) and therefore may not specify "); + JdbcAdapterProperties.checkPropertyConsistency(properties, JdbcAdapter.supportedDialects); + + properties = getMinimumMandatory(); + properties.put(JdbcAdapterProperties.PROP_USERNAME, "MY_USER"); + thrown.expect(RuntimeException.class); + thrown.expectMessage("You specified a connection (CONNECTION_NAME) and therefore may not specify "); + JdbcAdapterProperties.checkPropertyConsistency(properties, JdbcAdapter.supportedDialects); + + properties = getMinimumMandatory(); + properties.put(JdbcAdapterProperties.PROP_PASSWORD, "MY_PASSWORD"); + thrown.expect(RuntimeException.class); + thrown.expectMessage("You specified a connection (CONNECTION_NAME) and therefore may not specify "); + JdbcAdapterProperties.checkPropertyConsistency(properties, JdbcAdapter.supportedDialects); + } + + @Test + public void testNoDialect() { + Map properties = new HashMap<>(); + properties.put(JdbcAdapterProperties.PROP_CONNECTION_NAME, "MY_CONN"); + properties.put(JdbcAdapterProperties.PROP_SCHEMA_NAME, "MY_SCHEMA"); + thrown.expect(RuntimeException.class); + thrown.expectMessage("You have to specify the SQL dialect"); + JdbcAdapterProperties.checkPropertyConsistency(properties, JdbcAdapter.supportedDialects); + } + + @Test + public void testInvalidDialect() { + Map properties = new HashMap<>(); + properties.put(JdbcAdapterProperties.PROP_CONNECTION_NAME, "MY_CONN"); + properties.put(JdbcAdapterProperties.PROP_SCHEMA_NAME, "MY_SCHEMA"); + properties.put(JdbcAdapterProperties.PROP_SQL_DIALECT, "INVALID_DIALECT"); + thrown.expect(RuntimeException.class); + thrown.expectMessage("SQL Dialect not supported"); + JdbcAdapterProperties.checkPropertyConsistency(properties, JdbcAdapter.supportedDialects); + } + + @Test + public void testInvalidDebugAddress1() { + Map properties = getMinimumMandatory(); + properties.put(JdbcAdapterProperties.PROP_DEBUG_ADDRESS, "bla"); + thrown.expect(RuntimeException.class); + thrown.expectMessage("You specified an invalid hostname and port"); + JdbcAdapterProperties.checkPropertyConsistency(properties, JdbcAdapter.supportedDialects); + } + + @Test + public void testInvalidDebugAddress2() { + Map properties = getMinimumMandatory(); + properties.put(JdbcAdapterProperties.PROP_DEBUG_ADDRESS, "bla:no-number"); + thrown.expect(RuntimeException.class); + thrown.expectMessage("You specified an invalid hostname and port"); + JdbcAdapterProperties.checkPropertyConsistency(properties, JdbcAdapter.supportedDialects); + } + + @Test + public void testInvalidDebugAddress3() { + Map properties = getMinimumMandatory(); + properties.put(JdbcAdapterProperties.PROP_DEBUG_ADDRESS, "bla:123:456"); + thrown.expect(RuntimeException.class); + thrown.expectMessage("You specified an invalid hostname and port"); + JdbcAdapterProperties.checkPropertyConsistency(properties, JdbcAdapter.supportedDialects); + } + + @Test + public void testValidDebugAddress() { + Map properties = getMinimumMandatory(); + properties.put(JdbcAdapterProperties.PROP_DEBUG_ADDRESS, "bla:123"); + JdbcAdapterProperties.checkPropertyConsistency(properties, JdbcAdapter.supportedDialects); + } + + @Test + public void testSchemaAndCatalogOptional() { + Map properties = new HashMap<>(); + properties.put(JdbcAdapterProperties.PROP_SQL_DIALECT, "GENERIC"); + properties.put(JdbcAdapterProperties.PROP_CONNECTION_NAME, "MY_CONN"); + JdbcAdapterProperties.checkPropertyConsistency(properties, JdbcAdapter.supportedDialects); + } + + @Test + public void checkValidBoolOptions() { + Map properties = getMinimumMandatory(); + properties.put(JdbcAdapterProperties.PROP_IS_LOCAL, "TrUe"); + JdbcAdapterProperties.checkPropertyConsistency(properties, JdbcAdapter.supportedDialects); + + properties = getMinimumMandatory(); + properties.put(JdbcAdapterProperties.PROP_IS_LOCAL, "FalSe"); + JdbcAdapterProperties.checkPropertyConsistency(properties, JdbcAdapter.supportedDialects); + + properties = getMinimumMandatory(); + properties.put(JdbcAdapterProperties.PROP_IMPORT_FROM_EXA, "TrUe"); + properties.put(JdbcAdapterProperties.PROP_EXA_CONNECTION_STRING, "localhost:5555"); + JdbcAdapterProperties.checkPropertyConsistency(properties, JdbcAdapter.supportedDialects); + + properties = getMinimumMandatory(); + properties.put(JdbcAdapterProperties.PROP_IMPORT_FROM_EXA, "FalSe"); + JdbcAdapterProperties.checkPropertyConsistency(properties, JdbcAdapter.supportedDialects); + } + + @Test + public void checkInvalidBoolOption() { + Map properties = getMinimumMandatory(); + properties.put(JdbcAdapterProperties.PROP_IS_LOCAL, "asdasd"); + thrown.expect(RuntimeException.class); + thrown.expectMessage("The value 'asdasd' for the property IS_LOCAL is invalid. It has to be either 'true' or 'false' (case insensitive)"); + JdbcAdapterProperties.checkPropertyConsistency(properties, JdbcAdapter.supportedDialects); + + properties = getMinimumMandatory(); + properties.put(JdbcAdapterProperties.PROP_IMPORT_FROM_EXA, "asdasd"); + properties.put(JdbcAdapterProperties.PROP_EXA_CONNECTION_STRING, "localhost:5555"); + thrown.expect(RuntimeException.class); + thrown.expectMessage("The value 'asdasd' for the property IMPORT_FROM_EXA is invalid. It has to be either 'true' or 'false' (case insensitive)"); + JdbcAdapterProperties.checkPropertyConsistency(properties, JdbcAdapter.supportedDialects); + } + + @Test + public void testInconsistentExaProperties() { + Map properties = getMinimumMandatory(); + properties.put(JdbcAdapterProperties.PROP_EXA_CONNECTION_STRING, "localhost:5555"); + thrown.expect(RuntimeException.class); + thrown.expectMessage("You defined the property EXA_CONNECTION_STRING without setting IMPORT_FROM_EXA"); + JdbcAdapterProperties.checkPropertyConsistency(properties, JdbcAdapter.supportedDialects); + } + + @Test + public void testInvalidExaProperties2() { + Map properties = getMinimumMandatory(); + properties.put(JdbcAdapterProperties.PROP_IMPORT_FROM_EXA, "True"); + + thrown.expect(RuntimeException.class); + thrown.expectMessage("You defined the property IMPORT_FROM_EXA, please also define EXA_CONNECTION_STRING"); + JdbcAdapterProperties.checkPropertyConsistency(properties, JdbcAdapter.supportedDialects); + } + + @Test + public void testGetTableFilters() { + Map properties = getMinimumMandatory(); + properties.put(JdbcAdapterProperties.PROP_TABLES, "T1, T2,T3,t4"); + List expectedTables = ImmutableList.of("T1", "T2", "T3", "t4"); + assertEquals(expectedTables, JdbcAdapterProperties.getTableFilter(properties)); + } + + @Test + public void testGetNewSchemaMetadata() { + Map oldSchemaProperties = new HashMap(); + oldSchemaProperties.put("EXISTING_PROP_1", "Old Value 1"); + oldSchemaProperties.put("EXISTING_PROP_2", "Old Value 2"); + + Map changedProperties = new HashMap(); + changedProperties.put("EXISTING_PROP_1", "New Value"); + changedProperties.put("EXISTING_PROP_2", null); + changedProperties.put("NEW_PROP", "VAL2"); + changedProperties.put("DELETED_PROP_NON_EXISTING", null); + + Map expectedChangedProperties = new HashMap(); + expectedChangedProperties.put("EXISTING_PROP_1", "New Value"); + expectedChangedProperties.put("NEW_PROP", "VAL2"); + + assertEquals(expectedChangedProperties, JdbcAdapterProperties.getNewProperties(oldSchemaProperties, changedProperties)); + } + +} diff --git a/jdbc-adapter/virtualschema-jdbc-adapter/src/test/resources/pushdown_request.json b/jdbc-adapter/virtualschema-jdbc-adapter/src/test/resources/pushdown_request.json new file mode 100644 index 000000000..19a5b664d --- /dev/null +++ b/jdbc-adapter/virtualschema-jdbc-adapter/src/test/resources/pushdown_request.json @@ -0,0 +1,154 @@ +{ + "type": "pushdown", + "pushdownRequest": { + "type" : "select", + "aggregationType" : "group_by", + "from" : + { + "type" : "table", + "name" : "CLICKS" + }, + "selectList" : + [ + { + "type" : "column", + "name" : "USER_ID", + "columnNr" : 1, + "tableName" : "CLICKS" + }, + { + "type" : "function_aggregate", + "name" : "count", + "arguments" : + [ + { + "type" : "column", + "name" : "URL", + "columnNr" : 2, + "tableName" : "CLICKS" + } + ] + } + ], + "filter" : + { + "type" : "predicate_less", + "left" : + { + "type" : "literal_exactnumeric", + "value" : "1" + }, + "right" : + { + "type" : "column", + "name" : "USER_ID", + "columnNr" : 1, + "tableName" : "CLICKS" + } + }, + "groupBy" : + [ + { + "type" : "column", + "name" : "USER_ID", + "columnNr" : 1, + "tableName" : "CLICKS" + } + ], + "having" : + { + "type" : "predicate_less", + "left" : + { + "type" : "literal_exactnumeric", + "value" : "1" + }, + "right" : + { + "type" : "function_aggregate", + "name" : "count", + "arguments" : + [ + { + "type" : "column", + "name" : "URL", + "columnNr" : 2, + "tableName" : "CLICKS" + } + ] + } + }, + "orderBy" : + [ + { + "type" : "order_by_element", + "expression" : + { + "columnNr" : 1, + "name" : "USER_ID", + "tableName" : "CLICKS", + "type" : "column" + }, + "isAscending" : true, + "nullsLast" : true + } + ], + "limit" : + { + "numElements" : 10 + } + }, + "involvedTables": [ + { + "name" : "CLICKS", + "columns" : + [ + { + "name" : "ID", + "dataType" : + { + "precision" : 22, + "scale" : 0, + "type" : "DECIMAL" + } + }, + { + "name" : "USER_ID", + "dataType" : + { + "precision" : 18, + "scale" : 0, + "type" : "DECIMAL" + } + }, + { + "name" : "URL", + "dataType" : + { + "size" : 1000, + "type" : "VARCHAR" + } + }, + { + "name" : "REQUEST_TIME", + "dataType" : + { + "type" : "TIMESTAMP" + } + } + ] + } + ], + "schemaMetadataInfo": { + "name": "MY_HIVE_VSCHEMA", + "adapterNotes": { + "lastRefreshed": "2015-03-01 12:10:01", + "key": "Any custom schema state here" + }, + "properties": { + "HIVE_SERVER": "my-hive-server", + "HIVE_DB": "my-hive-db", + "HIVE_USER": "my-hive-user" + } + } +} diff --git a/jdbc-adapter/virtualschema-jdbc-adapter/src/test/resources/pushdown_request_alltypes.json b/jdbc-adapter/virtualschema-jdbc-adapter/src/test/resources/pushdown_request_alltypes.json new file mode 100644 index 000000000..2c146e9e1 --- /dev/null +++ b/jdbc-adapter/virtualschema-jdbc-adapter/src/test/resources/pushdown_request_alltypes.json @@ -0,0 +1,154 @@ +{ + "type": "pushdown", + "pushdownRequest": { + "type" : "select", + "from" : + { + "type" : "table", + "name" : "T1" + } + }, + "involvedTables": [ + { + "name": "T1", + "columns": [ + { + "name": "C_DECIMAL", + "dataType": { + "type": "DECIMAL", + "precision": 18, + "scale": 2 + } + }, + { + "name": "C_DOUBLE", + "dataType": { + "type": "DOUBLE" + } + }, + { + "name": "C_VARCHAR_UTF8_1", + "dataType": { + "type": "VARCHAR", + "size": 10000, + "characterSet": "UTF8" + } + }, + { + "name": "C_VARCHAR_UTF8_2", + "dataType": { + "type": "VARCHAR", + "size": 10000 + } + }, + { + "name": "C_VARCHAR_ASCII", + "dataType": { + "type": "VARCHAR", + "size": 10000, + "characterSet": "ASCII" + } + }, + { + "name": "C_CHAR_UTF8_1", + "dataType": { + "type": "CHAR", + "size": 3 + } + }, + { + "name": "C_CHAR_UTF8_2", + "dataType": { + "type": "CHAR", + "size": 3, + "characterSet": "UTF8" + } + }, + { + "name": "C_CHAR_ASCII", + "dataType": { + "type": "CHAR", + "size": 3, + "characterSet": "ASCII" + } + }, + { + "name": "C_DATE", + "dataType": { + "type": "DATE" + } + }, + { + "name": "C_TIMESTAMP_1", + "dataType": { + "type": "TIMESTAMP" + } + }, + { + "name": "C_TIMESTAMP_2", + "dataType": { + "type": "TIMESTAMP", + "withLocalTimeZone": false + } + }, + { + "name": "C_TIMESTAMP_3", + "dataType": { + "type": "TIMESTAMP", + "withLocalTimeZone": true + } + }, + { + "name": "C_BOOLEAN", + "dataType": { + "type": "BOOLEAN" + } + }, + { + "name": "C_GEOMETRY", + "dataType": { + "type": "GEOMETRY", + "srid": 1 + } + }, + { + "name": "C_INTERVAL_DS_1", + "dataType": { + "type": "INTERVAL", + "fromTo": "DAY TO SECONDS" + } + }, + { + "name": "C_INTERVAL_DS_2", + "dataType": { + "type": "INTERVAL", + "fromTo": "DAY TO SECONDS", + "precision": 3, + "fraction": 4 + } + }, + { + "name": "C_INTERVAL_YM_1", + "dataType": { + "type": "INTERVAL", + "fromTo": "YEAR TO MONTH" + } + }, + { + "name": "C_INTERVAL_YM_2", + "dataType": { + "type": "INTERVAL", + "fromTo": "YEAR TO MONTH", + "precision": 3 + } + } + ] + } + ], + "schemaMetadataInfo": { + "name": "VS", + "adapterNotes": "", + "properties": { + } + } +} diff --git a/python-redis-demo-adapter/README.md b/python-redis-demo-adapter/README.md new file mode 100644 index 000000000..3eae250c5 --- /dev/null +++ b/python-redis-demo-adapter/README.md @@ -0,0 +1,54 @@ +# Python Redis Adapter +This Virtual Schema Adapter exposes the key value store [Redis](http://redis.io/) as a virtual table. +It is a very simple Adapter for demonstration purposes, but still tries to address a real use case. + +You can directly jump into the Adapter Code if you like: [python-redis-adapter.sql](https://github.com/EXASOL/virtual-schema-jdbc-adapter/blob/master/python-redis-adapter/python-redis-adapter.sql) + +This Adapter aims to demonstrate +* how to write a simple Adapter, with less than 100 lines of code +* how to use UDFs for pushdown +* how you can map a NoSQL data source to a virtual schema +* how you can control the pushdown behaviour via properties + +This Adapter is NOT meant +* to show how you write a good and stable Adapter for Redis +* to be used in production + +## How to use +First start redis and make sure that all EXASolution nodes can access redis. + +Then run all statements in the [python-redis-adapter.sql](https://github.com/EXASOL/virtual-schema-jdbc-adapter/blob/master/python-redis-adapter/python-redis-adapter.sql) file to create the Adapter Script and the UDFs. + +You can now use the Virtual Schema as follows: +```sql +-- Create the virtual schema pointing to your redis server +CREATE VIRTUAL SCHEMA redis USING adapter.redis_adapter WITH + REDIS_HOST = 'localhost' + REDIS_PORT = '6379'; + +-- This will create a virtual table KEY_VALS, with a key and a value column +DESCRIBE KEY_VALS; + +-- The recommended way is to query by key. +-- The Adapter supports pushdown for filters like KEY = 'value'. +-- This gets pushed down as redis "get" operation, which is extremely fast +SELECT * FROM key_vals WHERE KEY = 'foo'; + +-- This will run a scan on redis, which is pretty slow +SELECT * FROM key_vals; + +-- This also gets pushed down as scan, and filter happens in database. +-- It could easily be improved by adding the FN_PRED_LIKE capability and push it down to Redis, which offers pattern based search. +SELECT * FROM key_vals WHERE KEY like 'foo%'; + +-- Projection is also not pushed down, so little overhead here +SELECT "VALUE" FROM key_vals WHERE KEY = 'foo'; + +-- We can also change or add properties, here to change the pushdown behaviour +ALTER VIRTUAL SCHEMA redis SET DISABLE_SCAN='TRUE'; + +-- Now all queries where the KEY = 'VALUE' filter cannot be pushed down will fail. +-- We could also change the behaviour and return an empty table, but this is less intuitive for the user +SELECT * FROM key_vals; + +``` diff --git a/python-redis-demo-adapter/python-redis-adapter.sql b/python-redis-demo-adapter/python-redis-adapter.sql new file mode 100644 index 000000000..819199f03 --- /dev/null +++ b/python-redis-demo-adapter/python-redis-adapter.sql @@ -0,0 +1,100 @@ +-- +-- Minimalistic Python Redis Adapter, using UDFs for Pushdown +-- +CREATE SCHEMA adapter; + +CREATE OR REPLACE PYTHON ADAPTER SCRIPT adapter.redis_adapter AS + +import json +import string + +def adapter_call(request): + root = json.loads(request) + if root["type"] == "createVirtualSchema": + return handleCreateVSchema(root) + elif root["type"] == "dropVirtualSchema": + return json.dumps({"type": "dropVirtualSchema"}).encode('utf-8') + elif root["type"] == "refresh": + return json.dumps({"type": "refresh"}).encode('utf-8') + elif root["type"] == "setProperties": + return json.dumps({"type": "setProperties"}).encode('utf-8') + if root["type"] == "getCapabilities": + return json.dumps({ + "type": "getCapabilities", + "capabilities": ["FILTER_EXPRESSIONS","LITERAL_STRING","FN_PRED_EQUAL"] + }).encode('utf-8') # database expects utf-8 encoded string of type str. unicode not yet supported. + elif root["type"] == "pushdown": + return handlePushdown(root) + else: + raise ValueError('Unsupported callback') + +def handleCreateVSchema(root): + res = { + "type": "createVirtualSchema", + "schemaMetadata": { + "tables": [ + { + "name": "KEY_VALS", + "columns": [{ + "name": "KEY", + "dataType": {"type": "VARCHAR", "size": 2000000} + },{ + "name": "VALUE", + "dataType": {"type": "VARCHAR", "size": 2000000} + }] + }] + } + } + return json.dumps(res).encode('utf-8') + +def handlePushdown(root): + properties = root["schemaMetadataInfo"]["properties"] + host = properties["REDIS_HOST"] + port = int(properties["REDIS_PORT"]) + if "filter" in root["pushdownRequest"]: + key = root["pushdownRequest"]["filter"]["right"]["value"] + sql = "select adapter.redis_get('%s', %s, '%s')" % (host, port, key) + else: + if "DISABLE_SCAN" in properties: + if properties["DISABLE_SCAN"].lower() == "true": + raise RuntimeError('Full scan on redis would be required, but this was deactivated via the DISABLE_SCAN property') + sql = "select adapter.redis_scan('%s', %s)" % (host, port) + res = { + "type": "pushdown", + "sql": sql + } + return json.dumps(res).encode('utf-8') +/ + +-- The Adapter uses these UDFs for the pushdown +CREATE OR REPLACE PYTHON SET SCRIPT adapter.redis_get(host varchar(1000), port int, key varchar(1000)) EMITS (key varchar(2000000), val varchar(2000000)) AS +import redis +def run(ctx): + r = redis.StrictRedis(host=ctx.host, port=ctx.port, db=0) + ctx.emit(ctx.key, r.get(ctx.key)) +/ + +CREATE OR REPLACE PYTHON SET SCRIPT adapter.redis_scan(host varchar(1000), port int) EMITS (key varchar(2000000), val varchar(2000000)) AS +import redis +def run(ctx): + r = redis.StrictRedis(host=ctx.host, port=ctx.port, db=0) + # Do a full iteration. Ugly, but works:-) + offset = 0 + while True: + res = r.scan(offset) + offset = long(res[0]) + for key in res[1]: + ctx.emit(key, str(r.get(key))) + if offset == 0: + break +/ + +-- The following UDF is not required, but can be used directly to work with redis +-- Would be nice to support INSERT INTO for virtual tables :-) +CREATE OR REPLACE PYTHON SET SCRIPT adapter.redis_set(host varchar(1000), port int, key varchar(1000), val varchar(1000)) EMITS (key varchar(2000000), val varchar(2000000)) AS +import redis +def run(ctx): + r = redis.StrictRedis(host=ctx.host, port=ctx.port, db=0) + r.set(ctx.key, ctx.val) + ctx.emit(ctx.key, r.get(ctx.key)) +/