Modifier and Type | Field and Description |
---|---|
protected org.apache.paimon.shade.caffeine2.com.github.benmanes.caffeine.cache.Cache<Identifier,Table> |
CachingCatalog.tableCache |
Modifier and Type | Method and Description |
---|---|
protected Table |
AbstractCatalog.getDataOrFormatTable(Identifier identifier) |
Table |
DelegateCatalog.getTable(Identifier identifier) |
Table |
Catalog.getTable(Identifier identifier)
Return a
Table identified by the given Identifier . |
Table |
AbstractCatalog.getTable(Identifier identifier) |
Table |
CachingCatalog.getTable(Identifier identifier) |
Constructor and Description |
---|
GlobalIndexAssigner(Table table) |
IndexBootstrap(Table table) |
Modifier and Type | Method and Description |
---|---|
Table |
SystemCatalogTable.table() |
Table |
DataCatalogTable.table() |
Constructor and Description |
---|
DataCatalogTable(Table table,
org.apache.flink.table.api.TableSchema tableSchema,
List<String> partitionKeys,
Map<String,String> properties,
String comment,
Map<String,String> nonPhysicalColumnComments) |
SystemCatalogTable(Table table) |
Modifier and Type | Field and Description |
---|---|
protected Table |
TableActionBase.table |
Modifier and Type | Class and Description |
---|---|
class |
LookupFileStoreTable
FileStoreTable for lookup table. |
Modifier and Type | Method and Description |
---|---|
static DynamicPartitionLoader |
DynamicPartitionLoader.of(Table table) |
Constructor and Description |
---|
FileStoreLookupFunction(Table table,
int[] projection,
int[] joinKeyIndex,
Predicate predicate) |
Modifier and Type | Method and Description |
---|---|
protected Table |
ProcedureBase.table(String tableId) |
Constructor and Description |
---|
RemoteTableQuery(Table table) |
Modifier and Type | Method and Description |
---|---|
static org.apache.flink.streaming.api.datastream.DataStream<InternalRow> |
QueryFileMonitor.build(org.apache.flink.streaming.api.environment.StreamExecutionEnvironment env,
Table table) |
static void |
QueryService.build(org.apache.flink.streaming.api.environment.StreamExecutionEnvironment env,
Table table,
int parallelism) |
Constructor and Description |
---|
QueryAddressRegister(Table table) |
QueryExecutorOperator(Table table) |
QueryFileMonitor(Table table) |
Modifier and Type | Field and Description |
---|---|
protected Table |
FlinkTableSinkBase.table |
Modifier and Type | Method and Description |
---|---|
protected HashBucketAssignerOperator<T> |
DynamicBucketSink.createHashBucketAssignerOperator(String commitUser,
Table table,
Integer numAssigners,
SerializableFunction<TableSchema,PartitionKeyExtractor<T>> extractorFunction,
boolean overwrite) |
Constructor and Description |
---|
FlinkSinkBuilder(Table table) |
FlinkTableSink(org.apache.flink.table.catalog.ObjectIdentifier tableIdentifier,
Table table,
org.apache.flink.table.factories.DynamicTableFactory.Context context,
LogStoreTableFactory logStoreTableFactory) |
FlinkTableSinkBase(org.apache.flink.table.catalog.ObjectIdentifier tableIdentifier,
Table table,
org.apache.flink.table.factories.DynamicTableFactory.Context context,
LogStoreTableFactory logStoreTableFactory) |
HashBucketAssignerOperator(String commitUser,
Table table,
Integer numAssigners,
SerializableFunction<TableSchema,PartitionKeyExtractor<T>> extractorFunction,
boolean overwrite) |
LogFlinkSinkBuilder(Table table) |
SortCompactSinkBuilder(Table table) |
SupportsRowLevelOperationFlinkTableSink(org.apache.flink.table.catalog.ObjectIdentifier tableIdentifier,
Table table,
org.apache.flink.table.factories.DynamicTableFactory.Context context,
LogStoreTableFactory logStoreTableFactory) |
Modifier and Type | Method and Description |
---|---|
RichCdcSinkBuilder |
RichCdcSinkBuilder.withTable(Table table)
Deprecated.
Use constructor to pass Table.
|
CdcSinkBuilder<T> |
CdcSinkBuilder.withTable(Table table) |
Constructor and Description |
---|
RichCdcSinkBuilder(Table table) |
Modifier and Type | Method and Description |
---|---|
static GlobalIndexAssignerOperator |
GlobalIndexAssignerOperator.forRowData(Table table) |
Modifier and Type | Field and Description |
---|---|
protected Table |
FlinkTableSource.table |
Modifier and Type | Method and Description |
---|---|
Table |
FlinkTableSource.getTable() |
Modifier and Type | Method and Description |
---|---|
static FlinkSource |
LogHybridSourceFactory.buildHybridFirstSource(Table table,
int[][] projectedFields,
Predicate predicate) |
protected FileStoreLookupFunction |
BaseDataTableSource.getFileStoreLookupFunction(org.apache.flink.table.connector.source.LookupTableSource.LookupContext context,
Table table,
int[] projection,
int[] joinKey) |
Constructor and Description |
---|
BaseDataTableSource(org.apache.flink.table.catalog.ObjectIdentifier tableIdentifier,
Table table,
boolean streaming,
org.apache.flink.table.factories.DynamicTableFactory.Context context,
LogStoreTableFactory logStoreTableFactory,
Predicate predicate,
int[][] projectFields,
Long limit,
org.apache.flink.api.common.eventtime.WatermarkStrategy<org.apache.flink.table.data.RowData> watermarkStrategy,
boolean isBatchCountStar) |
DataTableSource(org.apache.flink.table.catalog.ObjectIdentifier tableIdentifier,
Table table,
boolean streaming,
org.apache.flink.table.factories.DynamicTableFactory.Context context,
LogStoreTableFactory logStoreTableFactory) |
DataTableSource(org.apache.flink.table.catalog.ObjectIdentifier tableIdentifier,
Table table,
boolean streaming,
org.apache.flink.table.factories.DynamicTableFactory.Context context,
LogStoreTableFactory logStoreTableFactory,
Predicate predicate,
int[][] projectFields,
Long limit,
org.apache.flink.api.common.eventtime.WatermarkStrategy<org.apache.flink.table.data.RowData> watermarkStrategy,
List<String> dynamicPartitionFilteringFields,
boolean isBatchCountStar) |
FlinkSourceBuilder(Table table) |
FlinkTableSource(Table table) |
FlinkTableSource(Table table,
Predicate predicate,
int[][] projectFields,
Long limit) |
SystemTableSource(Table table,
boolean isStreamingMode,
org.apache.flink.table.catalog.ObjectIdentifier tableIdentifier) |
SystemTableSource(Table table,
boolean isStreamingMode,
Predicate predicate,
int[][] projectFields,
Long limit,
int splitBatchSize,
FlinkConnectorOptions.SplitAssignMode splitAssignMode,
org.apache.flink.table.catalog.ObjectIdentifier tableIdentifier) |
Modifier and Type | Method and Description |
---|---|
static void |
TableScanUtils.streamingReadingValidate(Table table) |
static boolean |
TableScanUtils.supportCompactDiffStreamingReading(Table table)
Check whether streaming reading is supported based on the data changed before and after
compact.
|
Modifier and Type | Method and Description |
---|---|
Table |
HiveCatalog.getDataOrFormatTable(Identifier identifier) |
Modifier and Type | Method and Description |
---|---|
static List<DataFileMeta> |
FileMetaUtils.construct(FileIO fileIO,
String format,
String location,
Table paimonTable,
java.util.function.Predicate<FileStatus> filter,
Path dir,
Map<Path,Path> rollback) |
Modifier and Type | Class and Description |
---|---|
class |
PrivilegedFileStoreTable
FileStoreTable with privilege checks. |
class |
PrivilegedObjectTable
|
Modifier and Type | Method and Description |
---|---|
Table |
PrivilegedCatalog.getTable(Identifier identifier) |
Modifier and Type | Method and Description |
---|---|
static RowType |
SparkTypeUtils.toPartitionType(Table table) |
static org.apache.spark.sql.types.StructType |
SparkTypeUtils.toSparkPartitionType(Table table) |
Modifier and Type | Interface and Description |
---|---|
interface |
DataTable
A
Table for data. |
interface |
FileStoreTable
An abstraction layer above
FileStore to provide reading and writing of InternalRow . |
interface |
FormatTable
A file format table refers to a directory that contains multiple files of the same format, where
operations on this table allow for reading or writing to these files, facilitating the retrieval
of existing data and the addition of new files.
|
interface |
InnerTable
Inner table for implementation, provide newScan, newRead ...
|
interface |
ReadonlyTable
Readonly table which only provide implementation for scan and read.
|
Modifier and Type | Class and Description |
---|---|
class |
DelegatedFileStoreTable
Delegated
FileStoreTable . |
class |
FallbackReadFileStoreTable
A
FileStoreTable which mainly read from the current branch. |
static class |
FormatTable.FormatTableImpl
An implementation for
FormatTable . |
class |
KnownSplitsTable
A table to hold some known data splits.
|
Modifier and Type | Method and Description |
---|---|
Table |
Table.copy(Map<String,String> dynamicOptions)
Copy this table with adding dynamic options.
|
Table |
KnownSplitsTable.copy(Map<String,String> dynamicOptions) |
Modifier and Type | Interface and Description |
---|---|
interface |
ObjectTable
A object table refers to a directory that contains multiple objects (files), Object table
provides metadata indexes for unstructured data objects in this directory.
|
Modifier and Type | Class and Description |
---|---|
static class |
ObjectTable.ObjectTableImpl
An implementation for
ObjectTable . |
Modifier and Type | Class and Description |
---|---|
class |
AggregationFieldsTable
A
Table for showing Aggregation of table. |
class |
AllTableOptionsTable
This is a system table to display all the database-table properties.
|
class |
AuditLogTable
A
Table for reading audit log of table. |
class |
BinlogTable
A
Table for reading binlog of table. |
class |
BranchesTable
A
Table for showing branches of table. |
class |
BucketsTable
A
Table for showing buckets info. |
class |
CatalogOptionsTable
This is a system
Table to display catalog options. |
class |
CompactBucketsTable
A table to produce modified partitions and buckets (also including files in streaming mode) for
each snapshot.
|
class |
ConsumersTable
A
Table for showing consumers of table. |
class |
FileMonitorTable
A table to produce modified files for snapshots.
|
class |
FilesTable
A
Table for showing files of a snapshot in specific table. |
class |
ManifestsTable
A
Table for showing committing snapshots of table. |
class |
OptionsTable
A
Table for showing options of table. |
class |
PartitionsTable
A
Table for showing partitions info. |
class |
ReadOptimizedTable
A
Table optimized for reading by avoiding merging files. |
class |
SchemasTable
A
Table for showing schemas of table. |
class |
SinkTableLineageTable
This is a system table to display all the sink table lineages.
|
class |
SnapshotsTable
A
Table for showing committing snapshots of table. |
class |
SourceTableLineageTable
This is a system table to display all the source table lineages.
|
class |
StatisticTable
A
Table for showing statistic of table. |
class |
TableLineageTable
Base lineage table for source and sink table lineage.
|
class |
TagsTable
A
Table for showing tags of table. |
Modifier and Type | Field and Description |
---|---|
static Map<String,java.util.function.Function<FileStoreTable,Table>> |
SystemTableLoader.SYSTEM_TABLE_LOADERS |
Modifier and Type | Method and Description |
---|---|
Table |
BinlogTable.copy(Map<String,String> dynamicOptions) |
Table |
FilesTable.copy(Map<String,String> dynamicOptions) |
Table |
SchemasTable.copy(Map<String,String> dynamicOptions) |
Table |
OptionsTable.copy(Map<String,String> dynamicOptions) |
Table |
SnapshotsTable.copy(Map<String,String> dynamicOptions) |
Table |
TagsTable.copy(Map<String,String> dynamicOptions) |
Table |
CatalogOptionsTable.copy(Map<String,String> dynamicOptions) |
Table |
ManifestsTable.copy(Map<String,String> dynamicOptions) |
Table |
AuditLogTable.copy(Map<String,String> dynamicOptions) |
Table |
StatisticTable.copy(Map<String,String> dynamicOptions) |
Table |
AllTableOptionsTable.copy(Map<String,String> dynamicOptions) |
Table |
SinkTableLineageTable.copy(Map<String,String> dynamicOptions) |
Table |
ReadOptimizedTable.copy(Map<String,String> dynamicOptions) |
Table |
BucketsTable.copy(Map<String,String> dynamicOptions) |
Table |
ConsumersTable.copy(Map<String,String> dynamicOptions) |
Table |
BranchesTable.copy(Map<String,String> dynamicOptions) |
Table |
SourceTableLineageTable.copy(Map<String,String> dynamicOptions) |
Table |
PartitionsTable.copy(Map<String,String> dynamicOptions) |
Table |
AggregationFieldsTable.copy(Map<String,String> dynamicOptions) |
static Table |
SystemTableLoader.load(String type,
FileStoreTable dataTable) |
static Table |
SystemTableLoader.loadGlobal(String tableName,
FileIO fileIO,
java.util.function.Supplier<Map<String,Map<String,Path>>> allTablePaths,
Options catalogOptions,
LineageMetaFactory lineageMetaFactory) |
Copyright © 2023–2024 The Apache Software Foundation. All rights reserved.