1. Project Clover database Wed Nov 12 2025 05:07:35 UTC
  2. Package guru.mikelue.foxglove.jdbc

File JdbcDataGenerator.java

 

Coverage histogram

../../../../img/srcFileCovDistChart8.png
75% of files have more coverage

Code metrics

12
69
8
2
284
192
25
0.36
8.62
4
3.12

Classes

Class Line # Actions
JdbcDataGenerator 29 33 0% 16 15
0.6428571364.3%
DataGeneratorWorker 155 36 0% 9 3
0.936170293.6%
 

Contributing tests

This file is covered by 36 tests. .

Source view

1    package guru.mikelue.foxglove.jdbc;
2   
3    import java.sql.Connection;
4    import java.sql.SQLException;
5    import java.util.HashSet;
6    import java.util.List;
7    import java.util.Optional;
8    import java.util.function.Consumer;
9   
10    import javax.sql.DataSource;
11   
12    import org.apache.commons.lang3.Validate;
13    import org.apache.commons.lang3.mutable.MutableInt;
14    import org.slf4j.Logger;
15    import org.slf4j.LoggerFactory;
16   
17    import guru.mikelue.foxglove.ColumnMeta;
18    import guru.mikelue.foxglove.DataGenerator;
19    import guru.mikelue.foxglove.TupleAccessor;
20    import guru.mikelue.foxglove.setting.DataSetting;
21    import guru.mikelue.foxglove.setting.DataSettingInfo;
22    import guru.mikelue.foxglove.setting.LayeredDataSetting;
23   
24    import static guru.mikelue.foxglove.ColumnMeta.Property.AUTO_INCREMENT;
25   
26    /**
27    * JDBC implementation of {@link DataGenerator}.
28    */
 
29    public class JdbcDataGenerator implements DataGenerator<JdbcTableFacet> {
30    /**
31    * The default batch size for insertion of rows.
32    */
33    public final static int DEFAULT_BATCH_SIZE = 1024;
34   
35    private final DataSource dataSource;
36    private final Connection connection;
37    private int batchSize = DEFAULT_BATCH_SIZE;
38   
39    private Optional<DataSettingInfo> dataSetting = Optional.empty();
40    private final MetaDataCache metaDataCache;
41   
42    /**
43    * Uses the given data source as the target database for generated data.
44    *
45    * @param dataSource The data source to use
46    */
 
47  55 toggle public JdbcDataGenerator(DataSource dataSource)
48    {
49  55 this.dataSource = dataSource;
50  55 this.connection = null;
51   
52  55 try (var conn = dataSource.getConnection()) {
53  55 this.metaDataCache = new MetaDataCache(conn);
54    } catch (SQLException e) {
55  0 throw new RuntimeJdbcException(e);
56    }
57    }
58   
59    /**
60    * Uses the database and joins the transaction of the given connection.
61    *
62    * @param connection The connection to use
63    */
 
64  4 toggle public JdbcDataGenerator(Connection connection)
65    {
66  4 this.dataSource = null;
67  4 this.connection = connection;
68   
69  4 this.metaDataCache = new MetaDataCache(connection);
70    }
71   
72    /**
73    * Sets the batch size for insertion of rows.
74    *
75    * @param batchSize The batch size for insertion of rows
76    *
77    * @return This instance
78    */
 
79  0 toggle public JdbcDataGenerator setBatchSize(int batchSize)
80    {
81  0 Validate.isTrue(batchSize > 0, "Batch size must be greater than zero");
82   
83  0 this.batchSize = batchSize;
84   
85  0 return this;
86    }
87   
 
88  58 toggle @Override
89    public int generate(List<JdbcTableFacet> tableFacets)
90    throws RuntimeJdbcException
91    {
92  58 Validate.notEmpty(tableFacets, "At least one table facet must be given");
93   
94  58 final var joinConn = this.connection != null;
95   
96  58 if (joinConn) {
97  4 try {
98  4 metaDataCache.loadMetadata(tableFacets, this.connection);
99   
100  4 return new DataGeneratorWorker(
101    tableFacets, metaDataCache, dataSetting,
102    new TransactionGear(
103    this.connection, batchSize, true
104    )
105    )
106    .generate();
107    } catch (Exception e) {
108  0 try {
109  0 this.connection.rollback();
110    } catch (SQLException rollbackEx) {
111  0 throw new RuntimeJdbcException(e);
112    }
113   
114  0 throw new RuntimeJdbcException(e);
115    }
116    }
117   
118  54 try (var currentConn = this.dataSource.getConnection()) {
119  54 metaDataCache.loadMetadata(tableFacets, currentConn);
120   
121  54 try {
122  54 return new DataGeneratorWorker(
123    tableFacets, metaDataCache, dataSetting,
124    new TransactionGear(
125    currentConn, batchSize, false
126    )
127    ).generate();
128    } catch (Exception e) {
129  0 if (!currentConn.getAutoCommit()) {
130  0 currentConn.rollback();
131    }
132  0 throw e;
133    }
134    } catch (Exception e) {
135  0 throw new RuntimeJdbcException(e);
136    }
137    }
138   
139    /**
140    * This object has lower priority than table facet's own setting, yet
141    * has higher priority than {@link DataSetting#defaults()}.
142    *
143    * @param setting The setting to use
144    *
145    * @return This instance
146    */
 
147  1 toggle @Override
148    public DataGenerator<JdbcTableFacet> withSetting(DataSettingInfo setting)
149    {
150  1 dataSetting = Optional.of(setting);
151  1 return this;
152    }
153    }
154   
 
155    class DataGeneratorWorker {
156    private Logger logger = LoggerFactory.getLogger(DataGeneratorWorker.class);
157   
158    private final List<JdbcTableFacet> facetOfTables;
159    private final DataSettingInfo dataSetting;
160    private final TransactionGear transactionGear;
161    private final MetaDataCache metaDataCache;
162   
 
163  58 toggle DataGeneratorWorker(
164    List<JdbcTableFacet> facetOfTables,
165    MetaDataCache metaDataCache,
166    Optional<DataSettingInfo> dataSetting,
167    TransactionGear transactionGear
168    ) {
169  58 this.facetOfTables = facetOfTables;
170  58 this.metaDataCache = metaDataCache;
171  58 this.dataSetting = dataSetting.orElse(null);
172  58 this.transactionGear = transactionGear;
173    }
174   
 
175  58 toggle int generate() throws SQLException
176    {
177  58 int totalRowsGenerated = 0;
178   
179  58 try (var txWorker = new JdbcTxWorker(transactionGear)) {
180  58 for (var table: facetOfTables) {
181  88 var layeredDataSetting = new LayeredDataSetting(
182    table.getSetting().orElse(null),
183    dataSetting
184    );
185   
186  88 totalRowsGenerated += doInsertForTable(
187    txWorker, table, layeredDataSetting
188    );
189    }
190    }
191   
192  58 return totalRowsGenerated;
193    }
194   
195    /**
196    * Responsible for:
197    *
198    * 1. Determining the columns to be generated
199    * 2. Building the SQL statement
200    * 3. Building the row parameter generator
201    */
 
202  88 toggle private int doInsertForTable(
203    JdbcTxWorker txWorker, JdbcTableFacet table,
204    DataSettingInfo setting
205    ) throws SQLException
206    {
207  88 var metaOfColumns = metaDataCache.getMetaOfColumns(table.tableName());
208  88 if (metaOfColumns.isEmpty()) {
209  0 throw new RuntimeJdbcException("No column meta data found: " + table.tableName());
210    }
211   
212    /*
213    * Determines the target columns for data generation
214    */
215  88 var targetColumns = MetaUtils.filterColumns(
216    metaOfColumns, setting, table
217    );
218    // :~)
219   
220  88 var targetColumnsSet = new HashSet<>(targetColumns);
221  88 var namesOfGeneratedColumns = metaOfColumns.stream()
222    .filter(meta -> {
223    /*
224    * Excludes the columns should be generated by Foxglove.
225    */
226  1049 if (targetColumnsSet.contains(meta)) {
227  761 return false;
228    }
229    // :~)
230   
231  288 var properties = meta.properties();
232  288 return properties.contains(AUTO_INCREMENT);
233    })
234    .map(ColumnMeta::name)
235    .toArray(String[]::new);
236   
237    // Builds the insertion SQL
238  88 var sql = MetaUtils.buildInsertSql(
239    transactionGear.connection().getMetaData(),
240    table.tableName(), targetColumns
241    );
242   
243    /*
244    * Builds the row parameter generator
245    */
246  88 var rowGenerator = new RowParamsGenerator(table, targetColumns, setting);
247  88 if (logger.isDebugEnabled()) {
248  88 logger.debug(
249    "Generating data for table: {}({})",
250    table.tableName(),
251    targetColumns.stream()
252    .map(c -> String.format("%s(%s)", c.name(), c.typeName()))
253    .toList()
254    );
255    }
256    // :~)
257   
258  88 Consumer<TupleAccessor> tupleHandler = table.getHandlerOfTuple() != null ?
259    table.getHandlerOfTuple() : (t -> {});
260  88 var numberOfGeneratedRow = new MutableInt(0);
261  88 var valueTomb = table.getValueTomb();
262  88 var tupleSchema = new TupleAccessorImpl.TupleSchema(targetColumns);
263   
264  88 return txWorker.performInsert(
265    new JdbcTxWorker.InsertionContext(
266    sql, table.getNumberOfRows(), namesOfGeneratedColumns,
267    () -> {
268  1359 var newRow = rowGenerator.generateRowParams();
269  1359 var newTuple = tupleSchema.createTupleAccessor(
270    newRow, numberOfGeneratedRow.intValue()
271    );
272   
273  1359 tupleHandler.accept(newTuple);
274   
275  1359 numberOfGeneratedRow.increment();
276  1359 valueTomb.preserveProtoData(newTuple);
277   
278  1359 return newTuple.asMap();
279    }
280    ),
281    valueTomb::preserveAfterData
282    );
283    }
284    }